From eee8882e318cb267d2949cd475c831a6336daa36 Mon Sep 17 00:00:00 2001 From: Alan Gates Date: Wed, 23 Aug 2017 17:04:18 -0700 Subject: [PATCH 01/13] Moved Filter.g and ExpressionTree. As part of this I had to move PARTITION_DATE_FORMAT from HiveMetaStore to MetaStoreUtils. Without this the move would have had to wait until I moved HiveMetaStore. But this caused a cascade of other things to have to wait, so that I would have ended up with one huge patch that moves everything. --- metastore/pom.xml | 14 -------------- .../hadoop/hive/metastore/HiveMetaStore.java | 11 ----------- .../hive/metastore/MetaStoreDirectSql.java | 2 +- .../hive/ql/parse/BaseSemanticAnalyzer.java | 4 ++-- standalone-metastore/pom.xml | 19 +++++++++++++++++++ .../hive/metastore/parser/ExpressionTree.java | 18 +++++++++--------- .../hadoop/hive/metastore/parser/Filter.g | 0 .../hive/metastore/parser/package-info.java | 0 .../hive/metastore/utils/MetaStoreUtils.java | 13 +++++++++++++ 9 files changed, 44 insertions(+), 37 deletions(-) rename {metastore/src => standalone-metastore/src/main}/java/org/apache/hadoop/hive/metastore/parser/ExpressionTree.java (97%) rename {metastore/src => standalone-metastore/src/main}/java/org/apache/hadoop/hive/metastore/parser/Filter.g (100%) rename {metastore/src => standalone-metastore/src/main}/java/org/apache/hadoop/hive/metastore/parser/package-info.java (100%) diff --git a/metastore/pom.xml b/metastore/pom.xml index 5430580749ab..04c6f47879a4 100644 --- a/metastore/pom.xml +++ b/metastore/pom.xml @@ -267,20 +267,6 @@ ${basedir}/src/test - - org.antlr - antlr3-maven-plugin - - - - antlr - - - - - ${basedir}/src/java - - org.apache.maven.plugins maven-jar-plugin diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java b/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java index 6d789fba58c4..8d30b8fb8447 100644 --- a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java +++ b/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java @@ -187,17 +187,6 @@ public class HiveMetaStore extends ThriftHiveMetastore { @VisibleForTesting static long TEST_TIMEOUT_VALUE = -1; - /** A fixed date format to be used for hive partition column values. */ - public static final ThreadLocal PARTITION_DATE_FORMAT = - new ThreadLocal() { - @Override - protected DateFormat initialValue() { - DateFormat val = new SimpleDateFormat("yyyy-MM-dd"); - val.setLenient(false); // Without this, 2020-20-20 becomes 2021-08-20. - return val; - }; - }; - /** * default port on which to start the Hive server */ diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java b/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java index 9c51d8e1fa7f..d642622bfc56 100644 --- a/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java +++ b/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java @@ -1168,7 +1168,7 @@ public void visit(LeafNode node) throws MetaException { // Filter.g cannot parse a quoted date; try to parse date here too. try { nodeValue = new java.sql.Date( - HiveMetaStore.PARTITION_DATE_FORMAT.get().parse((String)nodeValue).getTime()); + org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.PARTITION_DATE_FORMAT.get().parse((String)nodeValue).getTime()); valType = FilterType.Date; } catch (ParseException pe) { // do nothing, handled below - types will mismatch } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java index 06e00d723085..fc7b89437e39 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java @@ -42,7 +42,6 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hive.common.FileUtils; import org.apache.hadoop.hive.conf.HiveConf; -import org.apache.hadoop.hive.metastore.HiveMetaStore; import org.apache.hadoop.hive.metastore.api.Database; import org.apache.hadoop.hive.metastore.api.FieldSchema; import org.apache.hadoop.hive.metastore.api.Order; @@ -50,6 +49,7 @@ import org.apache.hadoop.hive.metastore.api.SQLNotNullConstraint; import org.apache.hadoop.hive.metastore.api.SQLPrimaryKey; import org.apache.hadoop.hive.metastore.api.SQLUniqueConstraint; +import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils; import org.apache.hadoop.hive.ql.CompilationOpContext; import org.apache.hadoop.hive.ql.Context; import org.apache.hadoop.hive.ql.ErrorMsg; @@ -1752,7 +1752,7 @@ private static String normalizeDateCol( } else { throw new SemanticException("Unexpected date type " + colValue.getClass()); } - return HiveMetaStore.PARTITION_DATE_FORMAT.get().format(value); + return MetaStoreUtils.PARTITION_DATE_FORMAT.get().format(value); } protected WriteEntity toWriteEntity(String location) throws SemanticException { diff --git a/standalone-metastore/pom.xml b/standalone-metastore/pom.xml index acc50ca4302a..f792e38807a6 100644 --- a/standalone-metastore/pom.xml +++ b/standalone-metastore/pom.xml @@ -77,6 +77,11 @@ metrics-json ${dropwizard.version} + + org.antlr + antlr-runtime + ${antlr.version} + org.apache.commons commons-lang3 @@ -283,6 +288,20 @@ + + org.antlr + antlr3-maven-plugin + + + + antlr + + + + + ${basedir}/src/main/java + + org.apache.maven.plugins maven-antrun-plugin diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/parser/ExpressionTree.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/parser/ExpressionTree.java similarity index 97% rename from metastore/src/java/org/apache/hadoop/hive/metastore/parser/ExpressionTree.java rename to standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/parser/ExpressionTree.java index 12773ac9e972..d608e50b0d32 100644 --- a/metastore/src/java/org/apache/hadoop/hive/metastore/parser/ExpressionTree.java +++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/parser/ExpressionTree.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -27,8 +27,6 @@ import org.antlr.runtime.CharStream; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hive.metastore.ColumnType; -import org.apache.hadoop.hive.metastore.HiveMetaStore; -import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.metastore.Warehouse; import org.apache.hadoop.hive.metastore.api.MetaException; import org.apache.hadoop.hive.metastore.api.Table; @@ -36,6 +34,8 @@ import com.google.common.annotations.VisibleForTesting; import com.google.common.collect.Sets; +import org.apache.hadoop.hive.metastore.conf.MetastoreConf; +import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils; /** * The Class representing the filter as a binary tree. The tree has TreeNode's @@ -67,13 +67,13 @@ public enum Operator { private final String sqlOp; // private constructor - private Operator(String op){ + Operator(String op){ this.op = op; this.jdoOp = op; this.sqlOp = op; } - private Operator(String op, String jdoOp, String sqlOp){ + Operator(String op, String jdoOp, String sqlOp){ this.op = op; this.jdoOp = jdoOp; this.sqlOp = sqlOp; @@ -351,7 +351,7 @@ private void generateJDOFilterOverPartitions(Configuration conf, Table table, if (filterBuilder.hasError()) return; boolean canPushDownIntegral = - HiveConf.getBoolVar(conf, HiveConf.ConfVars.METASTORE_INTEGER_JDO_PUSHDOWN); + MetastoreConf.getBoolVar(conf, MetastoreConf.ConfVars.INTEGER_JDO_PUSHDOWN); String valueAsString = getJdoFilterPushdownParam( table, partitionColumnIndex, filterBuilder, canPushDownIntegral); if (filterBuilder.hasError()) return; @@ -443,7 +443,7 @@ private String getJdoFilterPushdownParam(Table table, int partColIndex, // columns have been excluded above, so it will either compare w/string or fail. Object val = value; if (value instanceof Date) { - val = HiveMetaStore.PARTITION_DATE_FORMAT.get().format((Date)value); + val = MetaStoreUtils.PARTITION_DATE_FORMAT.get().format((Date)value); } boolean isStringValue = val instanceof String; if (!isStringValue && (!isIntegralSupported || !(val instanceof Long))) { @@ -487,7 +487,7 @@ public void accept(TreeVisitor treeVisitor) throws MetaException { private static void makeFilterForEquals(String keyName, String value, String paramName, Map params, int keyPos, int keyCount, boolean isEq, FilterBuilder fltr) throws MetaException { - Map partKeyToVal = new HashMap(); + Map partKeyToVal = new HashMap<>(); partKeyToVal.put(keyName, value); // If a partition has multiple partition keys, we make the assumption that // makePartName with one key will return a substring of the name made @@ -526,7 +526,7 @@ private static void makeFilterForEquals(String keyName, String value, String par /** * The node stack used to keep track of the tree nodes during parsing. */ - private final Stack nodeStack = new Stack(); + private final Stack nodeStack = new Stack<>(); public TreeNode getRoot() { return this.root; diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/parser/Filter.g b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/parser/Filter.g similarity index 100% rename from metastore/src/java/org/apache/hadoop/hive/metastore/parser/Filter.g rename to standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/parser/Filter.g diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/parser/package-info.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/parser/package-info.java similarity index 100% rename from metastore/src/java/org/apache/hadoop/hive/metastore/parser/package-info.java rename to standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/parser/package-info.java diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/MetaStoreUtils.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/MetaStoreUtils.java index 37fc56bda888..3c2e1a1ba4bc 100644 --- a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/MetaStoreUtils.java +++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/MetaStoreUtils.java @@ -21,7 +21,20 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import java.text.DateFormat; +import java.text.SimpleDateFormat; + public class MetaStoreUtils { + /** A fixed date format to be used for hive partition column values. */ + public static final ThreadLocal PARTITION_DATE_FORMAT = + new ThreadLocal() { + @Override + protected DateFormat initialValue() { + DateFormat val = new SimpleDateFormat("yyyy-MM-dd"); + val.setLenient(false); // Without this, 2020-20-20 becomes 2021-08-20. + return val; + }; + }; private static final Logger LOG = LoggerFactory.getLogger(MetaStoreUtils.class); /** From 9682efabf2eb82810da8260277105ee11b3a100c Mon Sep 17 00:00:00 2001 From: Alan Gates Date: Thu, 24 Aug 2017 15:53:20 -0700 Subject: [PATCH 02/13] Moved NDV classes in prep for moving Stats mergers. --- standalone-metastore/pom.xml | 5 ++++ .../common/ndv/NumDistinctValueEstimator.java | 26 +++++++++---------- .../ndv/NumDistinctValueEstimatorFactory.java | 2 +- .../hadoop/hive/common/ndv/fm/FMSketch.java | 6 ++--- .../hive/common/ndv/fm/FMSketchUtils.java | 2 +- .../hive/common/ndv/hll/HLLConstants.java | 2 +- .../hive/common/ndv/hll/HLLDenseRegister.java | 2 +- .../hive/common/ndv/hll/HLLRegister.java | 8 +++--- .../common/ndv/hll/HLLSparseRegister.java | 4 +-- .../hive/common/ndv/hll/HyperLogLog.java | 2 +- .../hive/common/ndv/hll/HyperLogLogUtils.java | 2 +- .../ndv/fm/TestFMSketchSerialization.java | 3 ++- .../hive/common/ndv/hll/TestHLLNoBias.java | 6 ++--- .../common/ndv/hll/TestHLLSerialization.java | 10 +++---- .../hive/common/ndv/hll/TestHyperLogLog.java | 2 +- .../common/ndv/hll/TestHyperLogLogDense.java | 4 +-- .../common/ndv/hll/TestHyperLogLogSparse.java | 4 +-- .../common/ndv/hll/TestSparseEncodeHash.java | 2 +- 18 files changed, 49 insertions(+), 43 deletions(-) rename {common/src => standalone-metastore/src/main}/java/org/apache/hadoop/hive/common/ndv/NumDistinctValueEstimator.java (64%) rename {common/src => standalone-metastore/src/main}/java/org/apache/hadoop/hive/common/ndv/NumDistinctValueEstimatorFactory.java (99%) rename {common/src => standalone-metastore/src/main}/java/org/apache/hadoop/hive/common/ndv/fm/FMSketch.java (98%) rename {common/src => standalone-metastore/src/main}/java/org/apache/hadoop/hive/common/ndv/fm/FMSketchUtils.java (99%) rename {common/src => standalone-metastore/src/main}/java/org/apache/hadoop/hive/common/ndv/hll/HLLConstants.java (99%) rename {common/src => standalone-metastore/src/main}/java/org/apache/hadoop/hive/common/ndv/hll/HLLDenseRegister.java (99%) rename {common/src => standalone-metastore/src/main}/java/org/apache/hadoop/hive/common/ndv/hll/HLLRegister.java (93%) rename {common/src => standalone-metastore/src/main}/java/org/apache/hadoop/hive/common/ndv/hll/HLLSparseRegister.java (99%) rename {common/src => standalone-metastore/src/main}/java/org/apache/hadoop/hive/common/ndv/hll/HyperLogLog.java (99%) rename {common/src => standalone-metastore/src/main}/java/org/apache/hadoop/hive/common/ndv/hll/HyperLogLogUtils.java (99%) rename {common/src/test => standalone-metastore/src/test/java}/org/apache/hadoop/hive/common/ndv/fm/TestFMSketchSerialization.java (98%) rename {common/src/test => standalone-metastore/src/test/java}/org/apache/hadoop/hive/common/ndv/hll/TestHLLNoBias.java (97%) rename {common/src/test => standalone-metastore/src/test/java}/org/apache/hadoop/hive/common/ndv/hll/TestHLLSerialization.java (98%) rename {common/src/test => standalone-metastore/src/test/java}/org/apache/hadoop/hive/common/ndv/hll/TestHyperLogLog.java (99%) rename {common/src/test => standalone-metastore/src/test/java}/org/apache/hadoop/hive/common/ndv/hll/TestHyperLogLogDense.java (98%) rename {common/src/test => standalone-metastore/src/test/java}/org/apache/hadoop/hive/common/ndv/hll/TestHyperLogLogSparse.java (98%) rename {common/src/test => standalone-metastore/src/test/java}/org/apache/hadoop/hive/common/ndv/hll/TestSparseEncodeHash.java (99%) diff --git a/standalone-metastore/pom.xml b/standalone-metastore/pom.xml index f792e38807a6..c222e8c5e49d 100644 --- a/standalone-metastore/pom.xml +++ b/standalone-metastore/pom.xml @@ -77,6 +77,11 @@ metrics-json ${dropwizard.version} + + javolution + javolution + ${javolution.version} + org.antlr antlr-runtime diff --git a/common/src/java/org/apache/hadoop/hive/common/ndv/NumDistinctValueEstimator.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/common/ndv/NumDistinctValueEstimator.java similarity index 64% rename from common/src/java/org/apache/hadoop/hive/common/ndv/NumDistinctValueEstimator.java rename to standalone-metastore/src/main/java/org/apache/hadoop/hive/common/ndv/NumDistinctValueEstimator.java index ed0db14b9d28..668db105765f 100644 --- a/common/src/java/org/apache/hadoop/hive/common/ndv/NumDistinctValueEstimator.java +++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/common/ndv/NumDistinctValueEstimator.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -24,28 +24,28 @@ public interface NumDistinctValueEstimator { - static final Logger LOG = LoggerFactory.getLogger(NumDistinctValueEstimator.class.getName()); + Logger LOG = LoggerFactory.getLogger(NumDistinctValueEstimator.class.getName()); - public void reset(); + void reset(); - public byte[] serialize(); + byte[] serialize(); - public NumDistinctValueEstimator deserialize(byte[] buf); + NumDistinctValueEstimator deserialize(byte[] buf); - public void addToEstimator(long v); + void addToEstimator(long v); - public void addToEstimator(double d); + void addToEstimator(double d); - public void addToEstimator(String s); + void addToEstimator(String s); - public void addToEstimator(HiveDecimal decimal); + void addToEstimator(HiveDecimal decimal); - public void mergeEstimators(NumDistinctValueEstimator o); + void mergeEstimators(NumDistinctValueEstimator o); - public long estimateNumDistinctValues(); + long estimateNumDistinctValues(); - public int lengthFor(JavaDataModel model); + int lengthFor(JavaDataModel model); - public boolean canMerge(NumDistinctValueEstimator o); + boolean canMerge(NumDistinctValueEstimator o); } diff --git a/common/src/java/org/apache/hadoop/hive/common/ndv/NumDistinctValueEstimatorFactory.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/common/ndv/NumDistinctValueEstimatorFactory.java similarity index 99% rename from common/src/java/org/apache/hadoop/hive/common/ndv/NumDistinctValueEstimatorFactory.java rename to standalone-metastore/src/main/java/org/apache/hadoop/hive/common/ndv/NumDistinctValueEstimatorFactory.java index ca9075914fc2..4e4dfb7a21eb 100644 --- a/common/src/java/org/apache/hadoop/hive/common/ndv/NumDistinctValueEstimatorFactory.java +++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/common/ndv/NumDistinctValueEstimatorFactory.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/common/src/java/org/apache/hadoop/hive/common/ndv/fm/FMSketch.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/common/ndv/fm/FMSketch.java similarity index 98% rename from common/src/java/org/apache/hadoop/hive/common/ndv/fm/FMSketch.java rename to standalone-metastore/src/main/java/org/apache/hadoop/hive/common/ndv/fm/FMSketch.java index 36a49c226d36..f6cdc4ce8eb5 100644 --- a/common/src/java/org/apache/hadoop/hive/common/ndv/fm/FMSketch.java +++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/common/ndv/fm/FMSketch.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -23,7 +23,7 @@ import java.io.InputStream; import java.util.Random; -import org.apache.hadoop.hive.common.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.hive.common.ndv.NumDistinctValueEstimator; import org.apache.hadoop.hive.common.type.HiveDecimal; import org.apache.hadoop.hive.ql.util.JavaDataModel; @@ -313,7 +313,7 @@ public long estimateNumDistinctValues() { return ((long)(numDistinctValues)); } - @InterfaceAudience.LimitedPrivate(value = { "Hive" }) + @InterfaceAudience.LimitedPrivate(value = {"Hive" }) static int lengthFor(JavaDataModel model, Integer numVector) { int length = model.object(); length += model.primitive1() * 2; // two int diff --git a/common/src/java/org/apache/hadoop/hive/common/ndv/fm/FMSketchUtils.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/common/ndv/fm/FMSketchUtils.java similarity index 99% rename from common/src/java/org/apache/hadoop/hive/common/ndv/fm/FMSketchUtils.java rename to standalone-metastore/src/main/java/org/apache/hadoop/hive/common/ndv/fm/FMSketchUtils.java index 01506784472b..02c64b832115 100644 --- a/common/src/java/org/apache/hadoop/hive/common/ndv/fm/FMSketchUtils.java +++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/common/ndv/fm/FMSketchUtils.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/common/src/java/org/apache/hadoop/hive/common/ndv/hll/HLLConstants.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/common/ndv/hll/HLLConstants.java similarity index 99% rename from common/src/java/org/apache/hadoop/hive/common/ndv/hll/HLLConstants.java rename to standalone-metastore/src/main/java/org/apache/hadoop/hive/common/ndv/hll/HLLConstants.java index ded8edd93aa5..f30750fee46a 100644 --- a/common/src/java/org/apache/hadoop/hive/common/ndv/hll/HLLConstants.java +++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/common/ndv/hll/HLLConstants.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/common/src/java/org/apache/hadoop/hive/common/ndv/hll/HLLDenseRegister.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/common/ndv/hll/HLLDenseRegister.java similarity index 99% rename from common/src/java/org/apache/hadoop/hive/common/ndv/hll/HLLDenseRegister.java rename to standalone-metastore/src/main/java/org/apache/hadoop/hive/common/ndv/hll/HLLDenseRegister.java index 00cb039db1a8..c52746e61faa 100644 --- a/common/src/java/org/apache/hadoop/hive/common/ndv/hll/HLLDenseRegister.java +++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/common/ndv/hll/HLLDenseRegister.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/common/src/java/org/apache/hadoop/hive/common/ndv/hll/HLLRegister.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/common/ndv/hll/HLLRegister.java similarity index 93% rename from common/src/java/org/apache/hadoop/hive/common/ndv/hll/HLLRegister.java rename to standalone-metastore/src/main/java/org/apache/hadoop/hive/common/ndv/hll/HLLRegister.java index eefc60fbd65d..a90094db432d 100644 --- a/common/src/java/org/apache/hadoop/hive/common/ndv/hll/HLLRegister.java +++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/common/ndv/hll/HLLRegister.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -26,7 +26,7 @@ public interface HLLRegister { * - hashcode to add * @return true if register value is updated else false */ - public boolean add(long hashcode); + boolean add(long hashcode); /** * Instead of specifying hashcode, this interface can be used to directly @@ -39,12 +39,12 @@ public interface HLLRegister { * - register value * @return true if register value is updated else false */ - public boolean set(int idx, byte value); + boolean set(int idx, byte value); /** * Merge hyperloglog registers of the same type (SPARSE or DENSE register) * @param reg * - register to be merged */ - public void merge(HLLRegister reg); + void merge(HLLRegister reg); } diff --git a/common/src/java/org/apache/hadoop/hive/common/ndv/hll/HLLSparseRegister.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/common/ndv/hll/HLLSparseRegister.java similarity index 99% rename from common/src/java/org/apache/hadoop/hive/common/ndv/hll/HLLSparseRegister.java rename to standalone-metastore/src/main/java/org/apache/hadoop/hive/common/ndv/hll/HLLSparseRegister.java index a4a5ba90151f..82085dd05668 100644 --- a/common/src/java/org/apache/hadoop/hive/common/ndv/hll/HLLSparseRegister.java +++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/common/ndv/hll/HLLSparseRegister.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -46,7 +46,7 @@ public class HLLSparseRegister implements HLLRegister { public HLLSparseRegister(int p, int pp, int qp) { this.p = p; - this.sparseMap = new TreeMap(); + this.sparseMap = new TreeMap<>(); this.tempList = new int[HLLConstants.TEMP_LIST_DEFAULT_SIZE]; this.tempListIdx = 0; this.pPrime = pp; diff --git a/common/src/java/org/apache/hadoop/hive/common/ndv/hll/HyperLogLog.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/common/ndv/hll/HyperLogLog.java similarity index 99% rename from common/src/java/org/apache/hadoop/hive/common/ndv/hll/HyperLogLog.java rename to standalone-metastore/src/main/java/org/apache/hadoop/hive/common/ndv/hll/HyperLogLog.java index b80a0ac3ed6a..8bdb47b431f0 100644 --- a/common/src/java/org/apache/hadoop/hive/common/ndv/hll/HyperLogLog.java +++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/common/ndv/hll/HyperLogLog.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/common/src/java/org/apache/hadoop/hive/common/ndv/hll/HyperLogLogUtils.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/common/ndv/hll/HyperLogLogUtils.java similarity index 99% rename from common/src/java/org/apache/hadoop/hive/common/ndv/hll/HyperLogLogUtils.java rename to standalone-metastore/src/main/java/org/apache/hadoop/hive/common/ndv/hll/HyperLogLogUtils.java index 2d82bd08e69c..4e6510b7fa8c 100644 --- a/common/src/java/org/apache/hadoop/hive/common/ndv/hll/HyperLogLogUtils.java +++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/common/ndv/hll/HyperLogLogUtils.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/common/src/test/org/apache/hadoop/hive/common/ndv/fm/TestFMSketchSerialization.java b/standalone-metastore/src/test/java/org/apache/hadoop/hive/common/ndv/fm/TestFMSketchSerialization.java similarity index 98% rename from common/src/test/org/apache/hadoop/hive/common/ndv/fm/TestFMSketchSerialization.java rename to standalone-metastore/src/test/java/org/apache/hadoop/hive/common/ndv/fm/TestFMSketchSerialization.java index 377f9c7920a2..28f074de8c75 100644 --- a/common/src/test/org/apache/hadoop/hive/common/ndv/fm/TestFMSketchSerialization.java +++ b/standalone-metastore/src/test/java/org/apache/hadoop/hive/common/ndv/fm/TestFMSketchSerialization.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -24,6 +24,7 @@ import javolution.util.FastBitSet; +import org.apache.hadoop.hive.common.ndv.FMSketch; import org.apache.hadoop.hive.common.ndv.NumDistinctValueEstimatorFactory; import org.junit.Test; diff --git a/common/src/test/org/apache/hadoop/hive/common/ndv/hll/TestHLLNoBias.java b/standalone-metastore/src/test/java/org/apache/hadoop/hive/common/ndv/hll/TestHLLNoBias.java similarity index 97% rename from common/src/test/org/apache/hadoop/hive/common/ndv/hll/TestHLLNoBias.java rename to standalone-metastore/src/test/java/org/apache/hadoop/hive/common/ndv/hll/TestHLLNoBias.java index 30f5ca3e613c..f09cb8ce15f5 100644 --- a/common/src/test/org/apache/hadoop/hive/common/ndv/hll/TestHLLNoBias.java +++ b/standalone-metastore/src/test/java/org/apache/hadoop/hive/common/ndv/hll/TestHLLNoBias.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -71,7 +71,7 @@ public void testHLLAddHalfDistinct() { Random rand = new Random(size); HyperLogLog hll = HyperLogLog.builder().build(); int unique = size / 2; - Set hashset = new HashSet(); + Set hashset = new HashSet<>(); for (int i = 0; i < size; i++) { long val = rand.nextInt(unique); hashset.add(val); @@ -100,7 +100,7 @@ public void testHLLNoBiasDisabledHalfDistinct() { Random rand = new Random(size); HyperLogLog hll = HyperLogLog.builder().enableNoBias(false).build(); int unique = size / 2; - Set hashset = new HashSet(); + Set hashset = new HashSet<>(); for (int i = 0; i < size; i++) { long val = rand.nextInt(unique); hashset.add(val); diff --git a/common/src/test/org/apache/hadoop/hive/common/ndv/hll/TestHLLSerialization.java b/standalone-metastore/src/test/java/org/apache/hadoop/hive/common/ndv/hll/TestHLLSerialization.java similarity index 98% rename from common/src/test/org/apache/hadoop/hive/common/ndv/hll/TestHLLSerialization.java rename to standalone-metastore/src/test/java/org/apache/hadoop/hive/common/ndv/hll/TestHLLSerialization.java index b4b8df117481..08955d73b671 100644 --- a/common/src/test/org/apache/hadoop/hive/common/ndv/hll/TestHLLSerialization.java +++ b/standalone-metastore/src/test/java/org/apache/hadoop/hive/common/ndv/hll/TestHLLSerialization.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -97,7 +97,7 @@ public void testHLLSparseSerialization() throws IOException { public void testHLLSparseSerializationHalfDistinct() throws IOException { HyperLogLog hll = HyperLogLog.builder().setEncoding(EncodingType.SPARSE).build(); Random rand = new Random(SEED); - Set hashset = new HashSet(); + Set hashset = new HashSet<>(); for (int i = 0; i < size; i++) { int val = rand.nextInt(size / 2); hll.addLong(val); @@ -146,7 +146,7 @@ public void testHLLSparseNoBitPackingHalfDistinct() throws IOException { HyperLogLog hll = HyperLogLog.builder().setEncoding(EncodingType.SPARSE) .enableBitPacking(false).build(); Random rand = new Random(SEED); - Set hashset = new HashSet(); + Set hashset = new HashSet<>(); for (int i = 0; i < size; i++) { int val = rand.nextInt(size / 2); hll.addLong(val); @@ -193,7 +193,7 @@ public void testHLLDenseSerialization() throws IOException { public void testHLLDenseSerializationHalfDistinct() throws IOException { HyperLogLog hll = HyperLogLog.builder().setEncoding(EncodingType.DENSE).build(); Random rand = new Random(SEED); - Set hashset = new HashSet(); + Set hashset = new HashSet<>(); for (int i = 0; i < size; i++) { int val = rand.nextInt(size / 2); hll.addLong(val); @@ -242,7 +242,7 @@ public void testHLLDenseNoBitPackingHalfDistinct() throws IOException { HyperLogLog hll = HyperLogLog.builder().setEncoding(EncodingType.DENSE).enableBitPacking(false) .build(); Random rand = new Random(SEED); - Set hashset = new HashSet(); + Set hashset = new HashSet<>(); for (int i = 0; i < size; i++) { int val = rand.nextInt(size / 2); hll.addLong(val); diff --git a/common/src/test/org/apache/hadoop/hive/common/ndv/hll/TestHyperLogLog.java b/standalone-metastore/src/test/java/org/apache/hadoop/hive/common/ndv/hll/TestHyperLogLog.java similarity index 99% rename from common/src/test/org/apache/hadoop/hive/common/ndv/hll/TestHyperLogLog.java rename to standalone-metastore/src/test/java/org/apache/hadoop/hive/common/ndv/hll/TestHyperLogLog.java index 635073fc26e6..b0eaad801273 100644 --- a/common/src/test/org/apache/hadoop/hive/common/ndv/hll/TestHyperLogLog.java +++ b/standalone-metastore/src/test/java/org/apache/hadoop/hive/common/ndv/hll/TestHyperLogLog.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/common/src/test/org/apache/hadoop/hive/common/ndv/hll/TestHyperLogLogDense.java b/standalone-metastore/src/test/java/org/apache/hadoop/hive/common/ndv/hll/TestHyperLogLogDense.java similarity index 98% rename from common/src/test/org/apache/hadoop/hive/common/ndv/hll/TestHyperLogLogDense.java rename to standalone-metastore/src/test/java/org/apache/hadoop/hive/common/ndv/hll/TestHyperLogLogDense.java index 00fd785b6ff6..106a9ed1e518 100644 --- a/common/src/test/org/apache/hadoop/hive/common/ndv/hll/TestHyperLogLogDense.java +++ b/standalone-metastore/src/test/java/org/apache/hadoop/hive/common/ndv/hll/TestHyperLogLogDense.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -68,7 +68,7 @@ public void testHLLAddHalfDistinct() { Random rand = new Random(size); HyperLogLog hll = HyperLogLog.builder().setEncoding(HyperLogLog.EncodingType.DENSE).build(); int unique = size / 2; - Set hashset = new HashSet(); + Set hashset = new HashSet<>(); for (int i = 0; i < size; i++) { long val = rand.nextInt(unique); hashset.add(val); diff --git a/common/src/test/org/apache/hadoop/hive/common/ndv/hll/TestHyperLogLogSparse.java b/standalone-metastore/src/test/java/org/apache/hadoop/hive/common/ndv/hll/TestHyperLogLogSparse.java similarity index 98% rename from common/src/test/org/apache/hadoop/hive/common/ndv/hll/TestHyperLogLogSparse.java rename to standalone-metastore/src/test/java/org/apache/hadoop/hive/common/ndv/hll/TestHyperLogLogSparse.java index cfa58868e59c..50c7ea135525 100644 --- a/common/src/test/org/apache/hadoop/hive/common/ndv/hll/TestHyperLogLogSparse.java +++ b/standalone-metastore/src/test/java/org/apache/hadoop/hive/common/ndv/hll/TestHyperLogLogSparse.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -68,7 +68,7 @@ public void testHLLAddHalfDistinct() { Random rand = new Random(size); HyperLogLog hll = HyperLogLog.builder().build(); int unique = size / 2; - Set hashset = new HashSet(); + Set hashset = new HashSet<>(); for (int i = 0; i < size; i++) { long val = rand.nextInt(unique); hashset.add(val); diff --git a/common/src/test/org/apache/hadoop/hive/common/ndv/hll/TestSparseEncodeHash.java b/standalone-metastore/src/test/java/org/apache/hadoop/hive/common/ndv/hll/TestSparseEncodeHash.java similarity index 99% rename from common/src/test/org/apache/hadoop/hive/common/ndv/hll/TestSparseEncodeHash.java rename to standalone-metastore/src/test/java/org/apache/hadoop/hive/common/ndv/hll/TestSparseEncodeHash.java index 2c7e89b5e6d8..bcabe9511cca 100644 --- a/common/src/test/org/apache/hadoop/hive/common/ndv/hll/TestSparseEncodeHash.java +++ b/standalone-metastore/src/test/java/org/apache/hadoop/hive/common/ndv/hll/TestSparseEncodeHash.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information From 7fd7cc6df689a022405d0aaa7eb2e9e9d10e872d Mon Sep 17 00:00:00 2001 From: Alan Gates Date: Thu, 24 Aug 2017 16:12:36 -0700 Subject: [PATCH 03/13] Moved ColumnStatsMerger and implementors. --- .../metastore/columnstats/merge/BinaryColumnStatsMerger.java | 2 +- .../metastore/columnstats/merge/BooleanColumnStatsMerger.java | 2 +- .../hive/metastore/columnstats/merge/ColumnStatsMerger.java | 2 +- .../metastore/columnstats/merge/ColumnStatsMergerFactory.java | 2 +- .../hive/metastore/columnstats/merge/DateColumnStatsMerger.java | 2 +- .../metastore/columnstats/merge/DecimalColumnStatsMerger.java | 2 +- .../metastore/columnstats/merge/DoubleColumnStatsMerger.java | 2 +- .../hive/metastore/columnstats/merge/LongColumnStatsMerger.java | 2 +- .../metastore/columnstats/merge/StringColumnStatsMerger.java | 2 +- 9 files changed, 9 insertions(+), 9 deletions(-) rename {metastore/src => standalone-metastore/src/main}/java/org/apache/hadoop/hive/metastore/columnstats/merge/BinaryColumnStatsMerger.java (99%) rename {metastore/src => standalone-metastore/src/main}/java/org/apache/hadoop/hive/metastore/columnstats/merge/BooleanColumnStatsMerger.java (99%) rename {metastore/src => standalone-metastore/src/main}/java/org/apache/hadoop/hive/metastore/columnstats/merge/ColumnStatsMerger.java (99%) rename {metastore/src => standalone-metastore/src/main}/java/org/apache/hadoop/hive/metastore/columnstats/merge/ColumnStatsMergerFactory.java (99%) rename {metastore/src => standalone-metastore/src/main}/java/org/apache/hadoop/hive/metastore/columnstats/merge/DateColumnStatsMerger.java (99%) rename {metastore/src => standalone-metastore/src/main}/java/org/apache/hadoop/hive/metastore/columnstats/merge/DecimalColumnStatsMerger.java (99%) rename {metastore/src => standalone-metastore/src/main}/java/org/apache/hadoop/hive/metastore/columnstats/merge/DoubleColumnStatsMerger.java (99%) rename {metastore/src => standalone-metastore/src/main}/java/org/apache/hadoop/hive/metastore/columnstats/merge/LongColumnStatsMerger.java (99%) rename {metastore/src => standalone-metastore/src/main}/java/org/apache/hadoop/hive/metastore/columnstats/merge/StringColumnStatsMerger.java (99%) diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/columnstats/merge/BinaryColumnStatsMerger.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/columnstats/merge/BinaryColumnStatsMerger.java similarity index 99% rename from metastore/src/java/org/apache/hadoop/hive/metastore/columnstats/merge/BinaryColumnStatsMerger.java rename to standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/columnstats/merge/BinaryColumnStatsMerger.java index 4c2d1bc60223..1c2402f564c2 100644 --- a/metastore/src/java/org/apache/hadoop/hive/metastore/columnstats/merge/BinaryColumnStatsMerger.java +++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/columnstats/merge/BinaryColumnStatsMerger.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/columnstats/merge/BooleanColumnStatsMerger.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/columnstats/merge/BooleanColumnStatsMerger.java similarity index 99% rename from metastore/src/java/org/apache/hadoop/hive/metastore/columnstats/merge/BooleanColumnStatsMerger.java rename to standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/columnstats/merge/BooleanColumnStatsMerger.java index 8e5015323f21..fd6b87aa670c 100644 --- a/metastore/src/java/org/apache/hadoop/hive/metastore/columnstats/merge/BooleanColumnStatsMerger.java +++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/columnstats/merge/BooleanColumnStatsMerger.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/columnstats/merge/ColumnStatsMerger.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/columnstats/merge/ColumnStatsMerger.java similarity index 99% rename from metastore/src/java/org/apache/hadoop/hive/metastore/columnstats/merge/ColumnStatsMerger.java rename to standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/columnstats/merge/ColumnStatsMerger.java index 474d4ddcd10b..ce557565c452 100644 --- a/metastore/src/java/org/apache/hadoop/hive/metastore/columnstats/merge/ColumnStatsMerger.java +++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/columnstats/merge/ColumnStatsMerger.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/columnstats/merge/ColumnStatsMergerFactory.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/columnstats/merge/ColumnStatsMergerFactory.java similarity index 99% rename from metastore/src/java/org/apache/hadoop/hive/metastore/columnstats/merge/ColumnStatsMergerFactory.java rename to standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/columnstats/merge/ColumnStatsMergerFactory.java index 66be52413956..1a2d38e556c8 100644 --- a/metastore/src/java/org/apache/hadoop/hive/metastore/columnstats/merge/ColumnStatsMergerFactory.java +++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/columnstats/merge/ColumnStatsMergerFactory.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/columnstats/merge/DateColumnStatsMerger.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/columnstats/merge/DateColumnStatsMerger.java similarity index 99% rename from metastore/src/java/org/apache/hadoop/hive/metastore/columnstats/merge/DateColumnStatsMerger.java rename to standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/columnstats/merge/DateColumnStatsMerger.java index e783d3c345f2..5baebbb47b93 100644 --- a/metastore/src/java/org/apache/hadoop/hive/metastore/columnstats/merge/DateColumnStatsMerger.java +++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/columnstats/merge/DateColumnStatsMerger.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/columnstats/merge/DecimalColumnStatsMerger.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/columnstats/merge/DecimalColumnStatsMerger.java similarity index 99% rename from metastore/src/java/org/apache/hadoop/hive/metastore/columnstats/merge/DecimalColumnStatsMerger.java rename to standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/columnstats/merge/DecimalColumnStatsMerger.java index 54099f64c20e..01f3385d70e0 100644 --- a/metastore/src/java/org/apache/hadoop/hive/metastore/columnstats/merge/DecimalColumnStatsMerger.java +++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/columnstats/merge/DecimalColumnStatsMerger.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/columnstats/merge/DoubleColumnStatsMerger.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/columnstats/merge/DoubleColumnStatsMerger.java similarity index 99% rename from metastore/src/java/org/apache/hadoop/hive/metastore/columnstats/merge/DoubleColumnStatsMerger.java rename to standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/columnstats/merge/DoubleColumnStatsMerger.java index 817a55dc5b22..6a957518154b 100644 --- a/metastore/src/java/org/apache/hadoop/hive/metastore/columnstats/merge/DoubleColumnStatsMerger.java +++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/columnstats/merge/DoubleColumnStatsMerger.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/columnstats/merge/LongColumnStatsMerger.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/columnstats/merge/LongColumnStatsMerger.java similarity index 99% rename from metastore/src/java/org/apache/hadoop/hive/metastore/columnstats/merge/LongColumnStatsMerger.java rename to standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/columnstats/merge/LongColumnStatsMerger.java index dc048e0194a0..ca1a91205290 100644 --- a/metastore/src/java/org/apache/hadoop/hive/metastore/columnstats/merge/LongColumnStatsMerger.java +++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/columnstats/merge/LongColumnStatsMerger.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/columnstats/merge/StringColumnStatsMerger.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/columnstats/merge/StringColumnStatsMerger.java similarity index 99% rename from metastore/src/java/org/apache/hadoop/hive/metastore/columnstats/merge/StringColumnStatsMerger.java rename to standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/columnstats/merge/StringColumnStatsMerger.java index e353b8f70c4a..d6b4478ec8ee 100644 --- a/metastore/src/java/org/apache/hadoop/hive/metastore/columnstats/merge/StringColumnStatsMerger.java +++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/columnstats/merge/StringColumnStatsMerger.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information From 42af983cb5e3afd2b84ee9b6411ce9ecc8b96b9f Mon Sep 17 00:00:00 2001 From: Alan Gates Date: Thu, 24 Aug 2017 17:08:34 -0700 Subject: [PATCH 04/13] Moved MetaStoreDirectSql, StatObjectConverter, and ColumnStatsAggregator and implementations. Combined all of these together because there was a circular dependence. --- .../hadoop/hive/metastore/MetaStoreUtils.java | 29 +--- .../hadoop/hive/metastore/Deadline.java | 5 +- .../hive/metastore/DeadlineException.java | 0 .../hive/metastore/MetaStoreDirectSql.java | 19 +-- .../hive/metastore/StatObjectConverter.java | 2 +- .../aggr/BinaryColumnStatsAggregator.java | 2 +- .../aggr/BooleanColumnStatsAggregator.java | 2 +- .../aggr/ColumnStatsAggregator.java | 2 +- .../aggr/ColumnStatsAggregatorFactory.java | 2 +- .../aggr/DateColumnStatsAggregator.java | 10 +- .../aggr/DecimalColumnStatsAggregator.java | 12 +- .../aggr/DoubleColumnStatsAggregator.java | 10 +- .../aggr/IExtrapolatePartStatus.java | 4 +- .../aggr/LongColumnStatsAggregator.java | 10 +- .../aggr/StringColumnStatsAggregator.java | 10 +- .../hive/metastore/utils/MetaStoreUtils.java | 152 +++++++++++++++++- .../hadoop/hive/metastore/TestDeadline.java | 2 +- 17 files changed, 199 insertions(+), 74 deletions(-) rename {metastore/src => standalone-metastore/src/main}/java/org/apache/hadoop/hive/metastore/Deadline.java (98%) rename {metastore/src => standalone-metastore/src/main}/java/org/apache/hadoop/hive/metastore/DeadlineException.java (100%) rename {metastore/src => standalone-metastore/src/main}/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java (99%) rename {metastore/src => standalone-metastore/src/main}/java/org/apache/hadoop/hive/metastore/StatObjectConverter.java (99%) rename {metastore/src => standalone-metastore/src/main}/java/org/apache/hadoop/hive/metastore/columnstats/aggr/BinaryColumnStatsAggregator.java (99%) rename {metastore/src => standalone-metastore/src/main}/java/org/apache/hadoop/hive/metastore/columnstats/aggr/BooleanColumnStatsAggregator.java (99%) rename {metastore/src => standalone-metastore/src/main}/java/org/apache/hadoop/hive/metastore/columnstats/aggr/ColumnStatsAggregator.java (99%) rename {metastore/src => standalone-metastore/src/main}/java/org/apache/hadoop/hive/metastore/columnstats/aggr/ColumnStatsAggregatorFactory.java (99%) rename {metastore/src => standalone-metastore/src/main}/java/org/apache/hadoop/hive/metastore/columnstats/aggr/DateColumnStatsAggregator.java (98%) rename {metastore/src => standalone-metastore/src/main}/java/org/apache/hadoop/hive/metastore/columnstats/aggr/DecimalColumnStatsAggregator.java (98%) rename {metastore/src => standalone-metastore/src/main}/java/org/apache/hadoop/hive/metastore/columnstats/aggr/DoubleColumnStatsAggregator.java (98%) rename {metastore/src => standalone-metastore/src/main}/java/org/apache/hadoop/hive/metastore/columnstats/aggr/IExtrapolatePartStatus.java (95%) rename {metastore/src => standalone-metastore/src/main}/java/org/apache/hadoop/hive/metastore/columnstats/aggr/LongColumnStatsAggregator.java (98%) rename {metastore/src => standalone-metastore/src/main}/java/org/apache/hadoop/hive/metastore/columnstats/aggr/StringColumnStatsAggregator.java (98%) rename {metastore/src/test => standalone-metastore/src/test/java}/org/apache/hadoop/hive/metastore/TestDeadline.java (99%) diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java b/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java index a147a2590d6d..49e28fd7ca61 100644 --- a/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java +++ b/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java @@ -1818,29 +1818,6 @@ public static List getColumnNames(List schema) { return cols; } - // given a list of partStats, this function will give you an aggr stats - public static List aggrPartitionStats(List partStats, - String dbName, String tableName, List partNames, List colNames, - boolean useDensityFunctionForNDVEstimation, double ndvTuner) - throws MetaException { - // 1. group by the stats by colNames - // map the colName to List - Map> map = new HashMap<>(); - for (ColumnStatistics css : partStats) { - List objs = css.getStatsObj(); - for (ColumnStatisticsObj obj : objs) { - List singleObj = new ArrayList<>(); - singleObj.add(obj); - ColumnStatistics singleCS = new ColumnStatistics(css.getStatsDesc(), singleObj); - if (!map.containsKey(obj.getColName())) { - map.put(obj.getColName(), new ArrayList()); - } - map.get(obj.getColName()).add(singleCS); - } - } - return aggrPartitionStats(map,dbName,tableName,partNames,colNames,useDensityFunctionForNDVEstimation, ndvTuner); - } - public static List aggrPartitionStats( Map> map, String dbName, String tableName, final List partNames, List colNames, @@ -1975,16 +1952,12 @@ public static byte[] hashStorageDescriptor(StorageDescriptor sd, MessageDigest m return md.digest(); } - public static double decimalToDouble(Decimal decimal) { - return new BigDecimal(new BigInteger(decimal.getUnscaled()), decimal.getScale()).doubleValue(); - } - /** * Verify if the user is allowed to make DB notification related calls. * Only the superusers defined in the Hadoop proxy user settings have the permission. * * @param user the short user name - * @param config that contains the proxy user settings + * @param conf that contains the proxy user settings * @return if the user has the permission */ public static boolean checkUserHasHostProxyPrivileges(String user, Configuration conf, String ipAddress) { diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/Deadline.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/Deadline.java similarity index 98% rename from metastore/src/java/org/apache/hadoop/hive/metastore/Deadline.java rename to standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/Deadline.java index 99bd7b06fdf3..2e000054524d 100644 --- a/metastore/src/java/org/apache/hadoop/hive/metastore/Deadline.java +++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/Deadline.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,6 +18,7 @@ package org.apache.hadoop.hive.metastore; +import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.hive.metastore.api.MetaException; @@ -100,7 +101,7 @@ public static void resetTimeout(long timeoutMs) throws MetaException { /** * start the timer before a method is invoked. - * @param method + * @param method method to be invoked */ public static boolean startTimer(String method) throws MetaException { Deadline deadline = getCurrentDeadline(); diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/DeadlineException.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/DeadlineException.java similarity index 100% rename from metastore/src/java/org/apache/hadoop/hive/metastore/DeadlineException.java rename to standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/DeadlineException.java diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java similarity index 99% rename from metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java rename to standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java index d642622bfc56..36fb50d57557 100644 --- a/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java +++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -44,8 +44,6 @@ import org.apache.commons.lang.StringUtils; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hive.conf.HiveConf; -import org.apache.hadoop.hive.conf.HiveConf.ConfVars; import org.apache.hadoop.hive.metastore.AggregateStatsCache.AggrColStats; import org.apache.hadoop.hive.metastore.api.AggrStats; import org.apache.hadoop.hive.metastore.api.ColumnStatistics; @@ -66,6 +64,8 @@ import org.apache.hadoop.hive.metastore.api.SkewedInfo; import org.apache.hadoop.hive.metastore.api.StorageDescriptor; import org.apache.hadoop.hive.metastore.api.Table; +import org.apache.hadoop.hive.metastore.conf.MetastoreConf; +import org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars; import org.apache.hadoop.hive.metastore.model.MConstraint; import org.apache.hadoop.hive.metastore.model.MDatabase; import org.apache.hadoop.hive.metastore.model.MNotificationLog; @@ -80,6 +80,7 @@ import org.apache.hadoop.hive.metastore.parser.ExpressionTree.Operator; import org.apache.hadoop.hive.metastore.parser.ExpressionTree.TreeNode; import org.apache.hadoop.hive.metastore.parser.ExpressionTree.TreeVisitor; +import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils; import org.apache.hive.common.util.BloomFilter; import org.datanucleus.store.rdbms.query.ForwardQueryResult; import org.slf4j.Logger; @@ -146,7 +147,7 @@ public MetaStoreDirectSql(PersistenceManager pm, Configuration conf, String sche dbType = DatabaseProduct.OTHER; } this.dbType = dbType; - int batchSize = HiveConf.getIntVar(conf, ConfVars.METASTORE_DIRECT_SQL_PARTITION_BATCH_SIZE); + int batchSize = MetastoreConf.getIntVar(conf, ConfVars.DIRECT_SQL_PARTITION_BATCH_SIZE); if (batchSize == DETECT_BATCHING) { batchSize = DatabaseProduct.needsInBatching(dbType) ? 1000 : NO_BATCHING; } @@ -162,10 +163,10 @@ public MetaStoreDirectSql(PersistenceManager pm, Configuration conf, String sche } convertMapNullsToEmptyStrings = - HiveConf.getBoolVar(conf, ConfVars.METASTORE_ORM_RETRIEVE_MAPNULLS_AS_EMPTY_STRINGS); - defaultPartName = HiveConf.getVar(conf, ConfVars.DEFAULTPARTITIONNAME); + MetastoreConf.getBoolVar(conf, ConfVars.ORM_RETRIEVE_MAPNULLS_AS_EMPTY_STRINGS); + defaultPartName = MetastoreConf.getVar(conf, ConfVars.DEFAULTPARTITIONNAME); - String jdoIdFactory = HiveConf.getVar(conf, ConfVars.METASTORE_IDENTIFIER_FACTORY); + String jdoIdFactory = MetastoreConf.getVar(conf, ConfVars.IDENTIFIER_FACTORY); if (! ("datanucleus1".equalsIgnoreCase(jdoIdFactory))){ LOG.warn("Underlying metastore does not use 'datanucleus1' for its ORM naming scheme." + " Disabling directSQL as it uses hand-hardcoded SQL with that assumption."); @@ -177,8 +178,8 @@ public MetaStoreDirectSql(PersistenceManager pm, Configuration conf, String sche } } - isAggregateStatsCacheEnabled = HiveConf.getBoolVar( - conf, ConfVars.METASTORE_AGGREGATE_STATS_CACHE_ENABLED); + isAggregateStatsCacheEnabled = MetastoreConf.getBoolVar( + conf, ConfVars.AGGREGATE_STATS_CACHE_ENABLED); if (isAggregateStatsCacheEnabled) { aggrStatsCache = AggregateStatsCache.getInstance(conf); } diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/StatObjectConverter.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/StatObjectConverter.java similarity index 99% rename from metastore/src/java/org/apache/hadoop/hive/metastore/StatObjectConverter.java rename to standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/StatObjectConverter.java index 7c8054bee651..08ea67fc2f68 100644 --- a/metastore/src/java/org/apache/hadoop/hive/metastore/StatObjectConverter.java +++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/StatObjectConverter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/columnstats/aggr/BinaryColumnStatsAggregator.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/columnstats/aggr/BinaryColumnStatsAggregator.java similarity index 99% rename from metastore/src/java/org/apache/hadoop/hive/metastore/columnstats/aggr/BinaryColumnStatsAggregator.java rename to standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/columnstats/aggr/BinaryColumnStatsAggregator.java index e6c836b18388..45d5d8c984cd 100644 --- a/metastore/src/java/org/apache/hadoop/hive/metastore/columnstats/aggr/BinaryColumnStatsAggregator.java +++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/columnstats/aggr/BinaryColumnStatsAggregator.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/columnstats/aggr/BooleanColumnStatsAggregator.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/columnstats/aggr/BooleanColumnStatsAggregator.java similarity index 99% rename from metastore/src/java/org/apache/hadoop/hive/metastore/columnstats/aggr/BooleanColumnStatsAggregator.java rename to standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/columnstats/aggr/BooleanColumnStatsAggregator.java index a34bc9f38be2..8aac0fe33df8 100644 --- a/metastore/src/java/org/apache/hadoop/hive/metastore/columnstats/aggr/BooleanColumnStatsAggregator.java +++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/columnstats/aggr/BooleanColumnStatsAggregator.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/columnstats/aggr/ColumnStatsAggregator.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/columnstats/aggr/ColumnStatsAggregator.java similarity index 99% rename from metastore/src/java/org/apache/hadoop/hive/metastore/columnstats/aggr/ColumnStatsAggregator.java rename to standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/columnstats/aggr/ColumnStatsAggregator.java index a52e5e5275ce..cd0392d6c0dc 100644 --- a/metastore/src/java/org/apache/hadoop/hive/metastore/columnstats/aggr/ColumnStatsAggregator.java +++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/columnstats/aggr/ColumnStatsAggregator.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/columnstats/aggr/ColumnStatsAggregatorFactory.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/columnstats/aggr/ColumnStatsAggregatorFactory.java similarity index 99% rename from metastore/src/java/org/apache/hadoop/hive/metastore/columnstats/aggr/ColumnStatsAggregatorFactory.java rename to standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/columnstats/aggr/ColumnStatsAggregatorFactory.java index dfae70828cb3..7aaab4a6b978 100644 --- a/metastore/src/java/org/apache/hadoop/hive/metastore/columnstats/aggr/ColumnStatsAggregatorFactory.java +++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/columnstats/aggr/ColumnStatsAggregatorFactory.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/columnstats/aggr/DateColumnStatsAggregator.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/columnstats/aggr/DateColumnStatsAggregator.java similarity index 98% rename from metastore/src/java/org/apache/hadoop/hive/metastore/columnstats/aggr/DateColumnStatsAggregator.java rename to standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/columnstats/aggr/DateColumnStatsAggregator.java index ee953966c72d..7f2956152cfc 100644 --- a/metastore/src/java/org/apache/hadoop/hive/metastore/columnstats/aggr/DateColumnStatsAggregator.java +++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/columnstats/aggr/DateColumnStatsAggregator.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -145,12 +145,12 @@ public ColumnStatisticsObj aggregate(String colName, List partNames, // we need extrapolation LOG.debug("start extrapolation for " + colName); - Map indexMap = new HashMap(); + Map indexMap = new HashMap<>(); for (int index = 0; index < partNames.size(); index++) { indexMap.put(partNames.get(index), index); } - Map adjustedIndexMap = new HashMap(); - Map adjustedStatsMap = new HashMap(); + Map adjustedIndexMap = new HashMap<>(); + Map adjustedStatsMap = new HashMap<>(); // while we scan the css, we also get the densityAvg, lowerbound and // higerbound when useDensityFunctionForNDVEstimation is true. double densityAvgSum = 0.0; @@ -261,7 +261,7 @@ public void extrapolate(ColumnStatisticsData extrapolateData, int numParts, for (Map.Entry entry : adjustedStatsMap.entrySet()) { extractedAdjustedStatsMap.put(entry.getKey(), entry.getValue().getDateStats()); } - List> list = new LinkedList>( + List> list = new LinkedList<>( extractedAdjustedStatsMap.entrySet()); // get the lowValue Collections.sort(list, new Comparator>() { diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/columnstats/aggr/DecimalColumnStatsAggregator.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/columnstats/aggr/DecimalColumnStatsAggregator.java similarity index 98% rename from metastore/src/java/org/apache/hadoop/hive/metastore/columnstats/aggr/DecimalColumnStatsAggregator.java rename to standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/columnstats/aggr/DecimalColumnStatsAggregator.java index 284c12cc2b38..05c028026299 100644 --- a/metastore/src/java/org/apache/hadoop/hive/metastore/columnstats/aggr/DecimalColumnStatsAggregator.java +++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/columnstats/aggr/DecimalColumnStatsAggregator.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -28,7 +28,6 @@ import org.apache.hadoop.hive.common.ndv.NumDistinctValueEstimator; import org.apache.hadoop.hive.common.ndv.NumDistinctValueEstimatorFactory; -import org.apache.hadoop.hive.metastore.MetaStoreUtils; import org.apache.hadoop.hive.metastore.StatObjectConverter; import org.apache.hadoop.hive.metastore.api.ColumnStatistics; import org.apache.hadoop.hive.metastore.api.ColumnStatisticsData; @@ -36,6 +35,7 @@ import org.apache.hadoop.hive.metastore.api.DecimalColumnStatsData; import org.apache.hadoop.hive.metastore.api.MetaException; import org.apache.hadoop.hive.metastore.columnstats.cache.DecimalColumnStatsDataInspector; +import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -155,12 +155,12 @@ public ColumnStatisticsObj aggregate(String colName, List partNames, } else { // we need extrapolation LOG.debug("start extrapolation for " + colName); - Map indexMap = new HashMap(); + Map indexMap = new HashMap<>(); for (int index = 0; index < partNames.size(); index++) { indexMap.put(partNames.get(index), index); } - Map adjustedIndexMap = new HashMap(); - Map adjustedStatsMap = new HashMap(); + Map adjustedIndexMap = new HashMap<>(); + Map adjustedStatsMap = new HashMap<>(); // while we scan the css, we also get the densityAvg, lowerbound and // higerbound when useDensityFunctionForNDVEstimation is true. double densityAvgSum = 0.0; @@ -270,7 +270,7 @@ public void extrapolate(ColumnStatisticsData extrapolateData, int numParts, for (Map.Entry entry : adjustedStatsMap.entrySet()) { extractedAdjustedStatsMap.put(entry.getKey(), entry.getValue().getDecimalStats()); } - List> list = new LinkedList>( + List> list = new LinkedList<>( extractedAdjustedStatsMap.entrySet()); // get the lowValue Collections.sort(list, new Comparator>() { diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/columnstats/aggr/DoubleColumnStatsAggregator.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/columnstats/aggr/DoubleColumnStatsAggregator.java similarity index 98% rename from metastore/src/java/org/apache/hadoop/hive/metastore/columnstats/aggr/DoubleColumnStatsAggregator.java rename to standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/columnstats/aggr/DoubleColumnStatsAggregator.java index bb4a725d44c0..faf22dcd7c51 100644 --- a/metastore/src/java/org/apache/hadoop/hive/metastore/columnstats/aggr/DoubleColumnStatsAggregator.java +++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/columnstats/aggr/DoubleColumnStatsAggregator.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -142,12 +142,12 @@ public ColumnStatisticsObj aggregate(String colName, List partNames, } else { // we need extrapolation LOG.debug("start extrapolation for " + colName); - Map indexMap = new HashMap(); + Map indexMap = new HashMap<>(); for (int index = 0; index < partNames.size(); index++) { indexMap.put(partNames.get(index), index); } - Map adjustedIndexMap = new HashMap(); - Map adjustedStatsMap = new HashMap(); + Map adjustedIndexMap = new HashMap<>(); + Map adjustedStatsMap = new HashMap<>(); // while we scan the css, we also get the densityAvg, lowerbound and // higerbound when useDensityFunctionForNDVEstimation is true. double densityAvgSum = 0.0; @@ -245,7 +245,7 @@ public void extrapolate(ColumnStatisticsData extrapolateData, int numParts, for (Map.Entry entry : adjustedStatsMap.entrySet()) { extractedAdjustedStatsMap.put(entry.getKey(), entry.getValue().getDoubleStats()); } - List> list = new LinkedList>( + List> list = new LinkedList<>( extractedAdjustedStatsMap.entrySet()); // get the lowValue Collections.sort(list, new Comparator>() { diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/columnstats/aggr/IExtrapolatePartStatus.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/columnstats/aggr/IExtrapolatePartStatus.java similarity index 95% rename from metastore/src/java/org/apache/hadoop/hive/metastore/columnstats/aggr/IExtrapolatePartStatus.java rename to standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/columnstats/aggr/IExtrapolatePartStatus.java index acf679e1c30a..98a121be3856 100644 --- a/metastore/src/java/org/apache/hadoop/hive/metastore/columnstats/aggr/IExtrapolatePartStatus.java +++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/columnstats/aggr/IExtrapolatePartStatus.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -40,7 +40,7 @@ public interface IExtrapolatePartStatus { * the average of ndv density, which is useful when * useDensityFunctionForNDVEstimation is true. */ - public abstract void extrapolate(ColumnStatisticsData extrapolateData, int numParts, + void extrapolate(ColumnStatisticsData extrapolateData, int numParts, int numPartsWithStats, Map adjustedIndexMap, Map adjustedStatsMap, double densityAvg); diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/columnstats/aggr/LongColumnStatsAggregator.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/columnstats/aggr/LongColumnStatsAggregator.java similarity index 98% rename from metastore/src/java/org/apache/hadoop/hive/metastore/columnstats/aggr/LongColumnStatsAggregator.java rename to standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/columnstats/aggr/LongColumnStatsAggregator.java index 5b1145e50767..d12cdc08ea89 100644 --- a/metastore/src/java/org/apache/hadoop/hive/metastore/columnstats/aggr/LongColumnStatsAggregator.java +++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/columnstats/aggr/LongColumnStatsAggregator.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -143,12 +143,12 @@ public ColumnStatisticsObj aggregate(String colName, List partNames, // we need extrapolation LOG.debug("start extrapolation for " + colName); - Map indexMap = new HashMap(); + Map indexMap = new HashMap<>(); for (int index = 0; index < partNames.size(); index++) { indexMap.put(partNames.get(index), index); } - Map adjustedIndexMap = new HashMap(); - Map adjustedStatsMap = new HashMap(); + Map adjustedIndexMap = new HashMap<>(); + Map adjustedStatsMap = new HashMap<>(); // while we scan the css, we also get the densityAvg, lowerbound and // higerbound when useDensityFunctionForNDVEstimation is true. double densityAvgSum = 0.0; @@ -246,7 +246,7 @@ public void extrapolate(ColumnStatisticsData extrapolateData, int numParts, for (Map.Entry entry : adjustedStatsMap.entrySet()) { extractedAdjustedStatsMap.put(entry.getKey(), entry.getValue().getLongStats()); } - List> list = new LinkedList>( + List> list = new LinkedList<>( extractedAdjustedStatsMap.entrySet()); // get the lowValue Collections.sort(list, new Comparator>() { diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/columnstats/aggr/StringColumnStatsAggregator.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/columnstats/aggr/StringColumnStatsAggregator.java similarity index 98% rename from metastore/src/java/org/apache/hadoop/hive/metastore/columnstats/aggr/StringColumnStatsAggregator.java rename to standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/columnstats/aggr/StringColumnStatsAggregator.java index 1b29f92d4a9e..4539e6b026bf 100644 --- a/metastore/src/java/org/apache/hadoop/hive/metastore/columnstats/aggr/StringColumnStatsAggregator.java +++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/columnstats/aggr/StringColumnStatsAggregator.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -125,12 +125,12 @@ public ColumnStatisticsObj aggregate(String colName, List partNames, // we need extrapolation LOG.debug("start extrapolation for " + colName); - Map indexMap = new HashMap(); + Map indexMap = new HashMap<>(); for (int index = 0; index < partNames.size(); index++) { indexMap.put(partNames.get(index), index); } - Map adjustedIndexMap = new HashMap(); - Map adjustedStatsMap = new HashMap(); + Map adjustedIndexMap = new HashMap<>(); + Map adjustedStatsMap = new HashMap<>(); if (ndvEstimator == null) { // if not every partition uses bitvector for ndv, we just fall back to // the traditional extrapolation methods. @@ -217,7 +217,7 @@ public void extrapolate(ColumnStatisticsData extrapolateData, int numParts, for (Map.Entry entry : adjustedStatsMap.entrySet()) { extractedAdjustedStatsMap.put(entry.getKey(), entry.getValue().getStringStats()); } - List> list = new LinkedList>( + List> list = new LinkedList<>( extractedAdjustedStatsMap.entrySet()); // get the avgLen Collections.sort(list, new Comparator>() { diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/MetaStoreUtils.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/MetaStoreUtils.java index 3c2e1a1ba4bc..074c067dad92 100644 --- a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/MetaStoreUtils.java +++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/MetaStoreUtils.java @@ -17,12 +17,33 @@ */ package org.apache.hadoop.hive.metastore.utils; +import com.google.common.base.Predicates; +import com.google.common.collect.Lists; +import com.google.common.collect.Maps; +import com.google.common.util.concurrent.ThreadFactoryBuilder; +import org.apache.hadoop.hive.metastore.api.ColumnStatistics; +import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj; +import org.apache.hadoop.hive.metastore.api.Decimal; import org.apache.hadoop.hive.metastore.api.MetaException; +import org.apache.hadoop.hive.metastore.columnstats.aggr.ColumnStatsAggregator; +import org.apache.hadoop.hive.metastore.columnstats.aggr.ColumnStatsAggregatorFactory; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import javax.annotation.Nullable; +import java.math.BigDecimal; +import java.math.BigInteger; import java.text.DateFormat; import java.text.SimpleDateFormat; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.concurrent.Callable; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.Future; public class MetaStoreUtils { /** A fixed date format to be used for hive partition column values. */ @@ -33,7 +54,7 @@ protected DateFormat initialValue() { DateFormat val = new SimpleDateFormat("yyyy-MM-dd"); val.setLenient(false); // Without this, 2020-20-20 becomes 2021-08-20. return val; - }; + } }; private static final Logger LOG = LoggerFactory.getLogger(MetaStoreUtils.class); @@ -66,4 +87,133 @@ public static String encodeTableName(String name) { } return sb.toString(); } + + /** + * convert Exception to MetaException, which sets the cause to such exception + * @param e cause of the exception + * @return the MetaException with the specified exception as the cause + */ + public static MetaException newMetaException(Exception e) { + return newMetaException(e != null ? e.getMessage() : null, e); + } + + /** + * convert Exception to MetaException, which sets the cause to such exception + * @param errorMessage the error message for this MetaException + * @param e cause of the exception + * @return the MetaException with the specified exception as the cause + */ + public static MetaException newMetaException(String errorMessage, Exception e) { + MetaException metaException = new MetaException(errorMessage); + if (e != null) { + metaException.initCause(e); + } + return metaException; + } + + /** + * Helper function to transform Nulls to empty strings. + */ + private static final com.google.common.base.Function transFormNullsToEmptyString + = new com.google.common.base.Function() { + @Override + public java.lang.String apply(@Nullable java.lang.String string) { + return org.apache.commons.lang.StringUtils.defaultString(string); + } + }; + /** + * We have aneed to sanity-check the map before conversion from persisted objects to + * metadata thrift objects because null values in maps will cause a NPE if we send + * across thrift. Pruning is appropriate for most cases except for databases such as + * Oracle where Empty strings are stored as nulls, in which case we need to handle that. + * See HIVE-8485 for motivations for this. + */ + public static Map trimMapNulls( + Map dnMap, boolean retrieveMapNullsAsEmptyStrings){ + if (dnMap == null){ + return null; + } + // Must be deterministic order map - see HIVE-8707 + // => we use Maps.newLinkedHashMap instead of Maps.newHashMap + if (retrieveMapNullsAsEmptyStrings) { + // convert any nulls present in map values to empty strings - this is done in the case + // of backing dbs like oracle which persist empty strings as nulls. + return Maps.newLinkedHashMap(Maps.transformValues(dnMap, transFormNullsToEmptyString)); + } else { + // prune any nulls present in map values - this is the typical case. + return Maps.newLinkedHashMap(Maps.filterValues(dnMap, Predicates.notNull())); + } + } + + + // given a list of partStats, this function will give you an aggr stats + public static List aggrPartitionStats(List partStats, + String dbName, String tableName, List partNames, List colNames, + boolean useDensityFunctionForNDVEstimation, double ndvTuner) + throws MetaException { + // 1. group by the stats by colNames + // map the colName to List + Map> map = new HashMap<>(); + for (ColumnStatistics css : partStats) { + List objs = css.getStatsObj(); + for (ColumnStatisticsObj obj : objs) { + List singleObj = new ArrayList<>(); + singleObj.add(obj); + ColumnStatistics singleCS = new ColumnStatistics(css.getStatsDesc(), singleObj); + if (!map.containsKey(obj.getColName())) { + map.put(obj.getColName(), new ArrayList()); + } + map.get(obj.getColName()).add(singleCS); + } + } + return MetaStoreUtils.aggrPartitionStats(map,dbName,tableName,partNames,colNames,useDensityFunctionForNDVEstimation, ndvTuner); + } + + public static List aggrPartitionStats( + Map> map, String dbName, String tableName, + final List partNames, List colNames, + final boolean useDensityFunctionForNDVEstimation,final double ndvTuner) throws MetaException { + List colStats = new ArrayList<>(); + // 2. Aggregate stats for each column in a separate thread + if (map.size()< 1) { + //stats are absent in RDBMS + LOG.debug("No stats data found for: dbName=" +dbName +" tblName=" + tableName + + " partNames= " + partNames + " colNames=" + colNames ); + return colStats; + } + final ExecutorService pool = Executors.newFixedThreadPool(Math.min(map.size(), 16), + new ThreadFactoryBuilder().setDaemon(true).setNameFormat("aggr-col-stats-%d").build()); + final List> futures = Lists.newLinkedList(); + + long start = System.currentTimeMillis(); + for (final Map.Entry> entry : map.entrySet()) { + futures.add(pool.submit(new Callable() { + @Override + public ColumnStatisticsObj call() throws Exception { + List css = entry.getValue(); + ColumnStatsAggregator aggregator = ColumnStatsAggregatorFactory.getColumnStatsAggregator(css + .iterator().next().getStatsObj().iterator().next().getStatsData().getSetField(), + useDensityFunctionForNDVEstimation, ndvTuner); + ColumnStatisticsObj statsObj = aggregator.aggregate(entry.getKey(), partNames, css); + return statsObj; + }})); + } + pool.shutdown(); + for (Future future : futures) { + try { + colStats.add(future.get()); + } catch (InterruptedException | ExecutionException e) { + pool.shutdownNow(); + LOG.debug(e.toString()); + throw new MetaException(e.toString()); + } + } + LOG.debug("Time for aggr col stats in seconds: {} Threads used: {}", + ((System.currentTimeMillis() - (double)start))/1000, Math.min(map.size(), 16)); + return colStats; + } + + public static double decimalToDouble(Decimal decimal) { + return new BigDecimal(new BigInteger(decimal.getUnscaled()), decimal.getScale()).doubleValue(); + } } diff --git a/metastore/src/test/org/apache/hadoop/hive/metastore/TestDeadline.java b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestDeadline.java similarity index 99% rename from metastore/src/test/org/apache/hadoop/hive/metastore/TestDeadline.java rename to standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestDeadline.java index ea11ed56b4cf..3a8443a8c083 100644 --- a/metastore/src/test/org/apache/hadoop/hive/metastore/TestDeadline.java +++ b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestDeadline.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information From 06574e9a944639e878e1c0205f4a22ebd09bd813 Mon Sep 17 00:00:00 2001 From: Alan Gates Date: Fri, 25 Aug 2017 16:37:33 -0700 Subject: [PATCH 05/13] Moved PartFilterExprUtil --- .../hive/metastore/PartFilterExprUtil.java | 19 +++++++---- .../hive/metastore/utils/JavaUtils.java | 33 +++++++++++++++++++ 2 files changed, 46 insertions(+), 6 deletions(-) rename {metastore/src => standalone-metastore/src/main}/java/org/apache/hadoop/hive/metastore/PartFilterExprUtil.java (86%) diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/PartFilterExprUtil.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/PartFilterExprUtil.java similarity index 86% rename from metastore/src/java/org/apache/hadoop/hive/metastore/PartFilterExprUtil.java rename to standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/PartFilterExprUtil.java index 41d7e817480a..b94063e899da 100644 --- a/metastore/src/java/org/apache/hadoop/hive/metastore/PartFilterExprUtil.java +++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/PartFilterExprUtil.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,10 +20,12 @@ import org.antlr.runtime.CommonTokenStream; import org.antlr.runtime.RecognitionException; +import org.apache.hadoop.hive.metastore.conf.MetastoreConf; +import org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars; +import org.apache.hadoop.hive.metastore.utils.JavaUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.metastore.api.MetaException; import org.apache.hadoop.hive.metastore.parser.ExpressionTree; import org.apache.hadoop.hive.metastore.parser.FilterLexer; @@ -47,7 +49,12 @@ public static ExpressionTree makeExpressionTree(PartitionExpressionProxy express try { filter = expressionProxy.convertExprToFilter(expr); } catch (MetaException ex) { - throw new IMetaStoreClient.IncompatibleMetastoreException(ex.getMessage()); + // TODO - for now we have construct this by reflection because IMetaStoreClient can't be + // moved until after HiveMetaStore is moved, which can't be moved until this is moved. + Class exClass = JavaUtils.getClass( + "org.apache.hadoop.hive.metastore.IMetaStoreClient.IncompatibleMetastoreException", + MetaException.class); + throw JavaUtils.newInstance(exClass, new Class[]{String.class}, new Object[]{ex.getMessage()}); } // Make a tree out of the filter. @@ -68,12 +75,12 @@ public static ExpressionTree makeExpressionTree(PartitionExpressionProxy express * @return The partition expression proxy. */ public static PartitionExpressionProxy createExpressionProxy(Configuration conf) { - String className = HiveConf.getVar(conf, HiveConf.ConfVars.METASTORE_EXPRESSION_PROXY_CLASS); + String className = MetastoreConf.getVar(conf, ConfVars.EXPRESSION_PROXY_CLASS); try { @SuppressWarnings("unchecked") Class clazz = - (Class)MetaStoreUtils.getClass(className); - return MetaStoreUtils.newInstance( + JavaUtils.getClass(className, PartitionExpressionProxy.class); + return JavaUtils.newInstance( clazz, new Class[0], new Object[0]); } catch (MetaException e) { LOG.error("Error loading PartitionExpressionProxy", e); diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/JavaUtils.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/JavaUtils.java index 40f739301eec..593dee3996f3 100644 --- a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/JavaUtils.java +++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/JavaUtils.java @@ -21,6 +21,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import java.lang.reflect.Constructor; import java.net.InetAddress; import java.net.UnknownHostException; @@ -54,6 +55,38 @@ public static Class getClass(String className, Class clazz) } } + /** + * Create an object of the given class. + * @param theClass + * @param parameterTypes + * an array of parameterTypes for the constructor + * @param initargs + * the list of arguments for the constructor + */ + public static T newInstance(Class theClass, Class[] parameterTypes, + Object[] initargs) { + // Perform some sanity checks on the arguments. + if (parameterTypes.length != initargs.length) { + throw new IllegalArgumentException( + "Number of constructor parameter types doesn't match number of arguments"); + } + for (int i = 0; i < parameterTypes.length; i++) { + Class clazz = parameterTypes[i]; + if (initargs[i] != null && !(clazz.isInstance(initargs[i]))) { + throw new IllegalArgumentException("Object : " + initargs[i] + + " is not an instance of " + clazz); + } + } + + try { + Constructor meth = theClass.getDeclaredConstructor(parameterTypes); + meth.setAccessible(true); + return meth.newInstance(initargs); + } catch (Exception e) { + throw new RuntimeException("Unable to instantiate " + theClass.getName(), e); + } + } + /** * @return name of current host */ From ef6d8ed5cfbb1ca3af3a86fbf4a23e283da466df Mon Sep 17 00:00:00 2001 From: Alan Gates Date: Wed, 30 Aug 2017 17:00:05 -0700 Subject: [PATCH 06/13] Moved stats data inspectors --- .../columnstats/cache/DateColumnStatsDataInspector.java | 0 .../columnstats/cache/DecimalColumnStatsDataInspector.java | 0 .../columnstats/cache/DoubleColumnStatsDataInspector.java | 0 .../columnstats/cache/LongColumnStatsDataInspector.java | 0 .../columnstats/cache/StringColumnStatsDataInspector.java | 0 .../hadoop/hive/common/ndv/fm/TestFMSketchSerialization.java | 1 - 6 files changed, 1 deletion(-) rename {metastore/src => standalone-metastore/src/main}/java/org/apache/hadoop/hive/metastore/columnstats/cache/DateColumnStatsDataInspector.java (100%) rename {metastore/src => standalone-metastore/src/main}/java/org/apache/hadoop/hive/metastore/columnstats/cache/DecimalColumnStatsDataInspector.java (100%) rename {metastore/src => standalone-metastore/src/main}/java/org/apache/hadoop/hive/metastore/columnstats/cache/DoubleColumnStatsDataInspector.java (100%) rename {metastore/src => standalone-metastore/src/main}/java/org/apache/hadoop/hive/metastore/columnstats/cache/LongColumnStatsDataInspector.java (100%) rename {metastore/src => standalone-metastore/src/main}/java/org/apache/hadoop/hive/metastore/columnstats/cache/StringColumnStatsDataInspector.java (100%) diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/columnstats/cache/DateColumnStatsDataInspector.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/columnstats/cache/DateColumnStatsDataInspector.java similarity index 100% rename from metastore/src/java/org/apache/hadoop/hive/metastore/columnstats/cache/DateColumnStatsDataInspector.java rename to standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/columnstats/cache/DateColumnStatsDataInspector.java diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/columnstats/cache/DecimalColumnStatsDataInspector.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/columnstats/cache/DecimalColumnStatsDataInspector.java similarity index 100% rename from metastore/src/java/org/apache/hadoop/hive/metastore/columnstats/cache/DecimalColumnStatsDataInspector.java rename to standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/columnstats/cache/DecimalColumnStatsDataInspector.java diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/columnstats/cache/DoubleColumnStatsDataInspector.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/columnstats/cache/DoubleColumnStatsDataInspector.java similarity index 100% rename from metastore/src/java/org/apache/hadoop/hive/metastore/columnstats/cache/DoubleColumnStatsDataInspector.java rename to standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/columnstats/cache/DoubleColumnStatsDataInspector.java diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/columnstats/cache/LongColumnStatsDataInspector.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/columnstats/cache/LongColumnStatsDataInspector.java similarity index 100% rename from metastore/src/java/org/apache/hadoop/hive/metastore/columnstats/cache/LongColumnStatsDataInspector.java rename to standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/columnstats/cache/LongColumnStatsDataInspector.java diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/columnstats/cache/StringColumnStatsDataInspector.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/columnstats/cache/StringColumnStatsDataInspector.java similarity index 100% rename from metastore/src/java/org/apache/hadoop/hive/metastore/columnstats/cache/StringColumnStatsDataInspector.java rename to standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/columnstats/cache/StringColumnStatsDataInspector.java diff --git a/standalone-metastore/src/test/java/org/apache/hadoop/hive/common/ndv/fm/TestFMSketchSerialization.java b/standalone-metastore/src/test/java/org/apache/hadoop/hive/common/ndv/fm/TestFMSketchSerialization.java index 28f074de8c75..e3a6f140b9fc 100644 --- a/standalone-metastore/src/test/java/org/apache/hadoop/hive/common/ndv/fm/TestFMSketchSerialization.java +++ b/standalone-metastore/src/test/java/org/apache/hadoop/hive/common/ndv/fm/TestFMSketchSerialization.java @@ -24,7 +24,6 @@ import javolution.util.FastBitSet; -import org.apache.hadoop.hive.common.ndv.FMSketch; import org.apache.hadoop.hive.common.ndv.NumDistinctValueEstimatorFactory; import org.junit.Test; From cd0e92da5f3097ccbcd4d6b54c20e924a4a91b4f Mon Sep 17 00:00:00 2001 From: Alan Gates Date: Fri, 25 Aug 2017 18:36:15 -0700 Subject: [PATCH 07/13] Moved RawStore and friends. --- .../hadoop/hive/metastore/MetaStoreUtils.java | 153 ---- .../ql/io/orc/TestOrcSplitElimination.java | 2 +- standalone-metastore/pom.xml | 11 +- .../hadoop/hive/common/StatsSetupConst.java | 15 +- .../hive/metastore/FileMetadataHandler.java | 3 +- .../hadoop/hive/metastore}/MetadataStore.java | 4 +- .../hadoop/hive/metastore/ObjectStore.java | 693 +++++++++--------- .../hive/metastore/PartFilterExprUtil.java | 3 +- .../hadoop/hive/metastore/RawStore.java | 230 +++--- .../metastore/cache/ByteArrayWrapper.java | 2 +- .../hive/metastore/cache/CacheUtils.java | 25 +- .../hive/metastore/cache/CachedStore.java | 106 ++- .../hive/metastore/cache/SharedCache.java | 42 +- .../hive/metastore/conf/MetastoreConf.java | 2 + .../hive/metastore/utils/MetaStoreUtils.java | 143 ++++ .../hive/metastore/utils/ObjectPair.java | 86 +++ .../hive/common/TestStatsSetupConst.java | 2 +- .../hive/metastore/TestObjectStore.java | 46 +- .../hive/metastore/cache/TestCachedStore.java | 161 ++-- 19 files changed, 901 insertions(+), 828 deletions(-) rename {common/src => standalone-metastore/src/main}/java/org/apache/hadoop/hive/common/StatsSetupConst.java (97%) rename {metastore/src => standalone-metastore/src/main}/java/org/apache/hadoop/hive/metastore/FileMetadataHandler.java (98%) rename {metastore/src/java/org/apache/hadoop/hive/metastore/hbase => standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore}/MetadataStore.java (97%) rename {metastore/src => standalone-metastore/src/main}/java/org/apache/hadoop/hive/metastore/ObjectStore.java (93%) rename {metastore/src => standalone-metastore/src/main}/java/org/apache/hadoop/hive/metastore/RawStore.java (72%) rename {metastore/src => standalone-metastore/src/main}/java/org/apache/hadoop/hive/metastore/cache/ByteArrayWrapper.java (99%) rename {metastore/src => standalone-metastore/src/main}/java/org/apache/hadoop/hive/metastore/cache/CacheUtils.java (85%) rename {metastore/src => standalone-metastore/src/main}/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java (95%) rename {metastore/src => standalone-metastore/src/main}/java/org/apache/hadoop/hive/metastore/cache/SharedCache.java (93%) create mode 100644 standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/ObjectPair.java rename {common/src/test => standalone-metastore/src/test/java}/org/apache/hadoop/hive/common/TestStatsSetupConst.java (99%) rename {metastore/src/test => standalone-metastore/src/test/java}/org/apache/hadoop/hive/metastore/TestObjectStore.java (94%) rename {metastore/src/test => standalone-metastore/src/test/java}/org/apache/hadoop/hive/metastore/cache/TestCachedStore.java (87%) diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java b/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java index 49e28fd7ca61..5343ed557ffa 100644 --- a/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java +++ b/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java @@ -126,7 +126,6 @@ public class MetaStoreUtils { // configuration parameter documentation // HIVE_SUPPORT_SPECICAL_CHARACTERS_IN_TABLE_NAMES in HiveConf as well. public static final char[] specialCharactersInTableNames = new char[] { '/' }; - final static Charset ENCODING = StandardCharsets.UTF_8; public static Table createColumnsetSchema(String name, List columns, List partCols, Configuration conf) throws MetaException { @@ -1180,15 +1179,6 @@ public static Properties getSchema( return addCols(getSchemaWithoutCols(sd, tblsd, parameters, databaseName, tableName, partitionKeys), tblsd.getCols()); } - public static List getColumnNamesForTable(Table table) { - List colNames = new ArrayList(); - Iterator colsIterator = table.getSd().getColsIterator(); - while (colsIterator.hasNext()) { - colNames.add(colsIterator.next().getName()); - } - return colNames; - } - public static String getColumnNameDelimiter(List fieldSchemas) { // we first take a look if any fieldSchemas contain COMMA for (int i = 0; i < fieldSchemas.size(); i++) { @@ -1787,15 +1777,6 @@ public static void mergeColStats(ColumnStatistics csNew, ColumnStatistics csOld) csNew.setStatsObj(list); } - /** - * convert Exception to MetaException, which sets the cause to such exception - * @param e cause of the exception - * @return the MetaException with the specified exception as the cause - */ - public static MetaException newMetaException(Exception e) { - return newMetaException(e != null ? e.getMessage() : null, e); - } - /** * convert Exception to MetaException, which sets the cause to such exception * @param errorMessage the error message for this MetaException @@ -1818,140 +1799,6 @@ public static List getColumnNames(List schema) { return cols; } - public static List aggrPartitionStats( - Map> map, String dbName, String tableName, - final List partNames, List colNames, - final boolean useDensityFunctionForNDVEstimation,final double ndvTuner) throws MetaException { - List colStats = new ArrayList<>(); - // 2. Aggregate stats for each column in a separate thread - if (map.size()< 1) { - //stats are absent in RDBMS - LOG.debug("No stats data found for: dbName=" +dbName +" tblName=" + tableName + - " partNames= " + partNames + " colNames=" + colNames ); - return colStats; - } - final ExecutorService pool = Executors.newFixedThreadPool(Math.min(map.size(), 16), - new ThreadFactoryBuilder().setDaemon(true).setNameFormat("aggr-col-stats-%d").build()); - final List> futures = Lists.newLinkedList(); - - long start = System.currentTimeMillis(); - for (final Entry> entry : map.entrySet()) { - futures.add(pool.submit(new Callable() { - @Override - public ColumnStatisticsObj call() throws Exception { - List css = entry.getValue(); - ColumnStatsAggregator aggregator = ColumnStatsAggregatorFactory.getColumnStatsAggregator(css - .iterator().next().getStatsObj().iterator().next().getStatsData().getSetField(), - useDensityFunctionForNDVEstimation, ndvTuner); - ColumnStatisticsObj statsObj = aggregator.aggregate(entry.getKey(), partNames, css); - return statsObj; - }})); - } - pool.shutdown(); - for (Future future : futures) { - try { - colStats.add(future.get()); - } catch (InterruptedException | ExecutionException e) { - pool.shutdownNow(); - LOG.debug(e.toString()); - throw new MetaException(e.toString()); - } - } - LOG.debug("Time for aggr col stats in seconds: {} Threads used: {}", - ((System.currentTimeMillis() - (double)start))/1000, Math.min(map.size(), 16)); - return colStats; - } - - - /** - * Produce a hash for the storage descriptor - * @param sd storage descriptor to hash - * @param md message descriptor to use to generate the hash - * @return the hash as a byte array - */ - public static byte[] hashStorageDescriptor(StorageDescriptor sd, MessageDigest md) { - // Note all maps and lists have to be absolutely sorted. Otherwise we'll produce different - // results for hashes based on the OS or JVM being used. - md.reset(); - for (FieldSchema fs : sd.getCols()) { - md.update(fs.getName().getBytes(ENCODING)); - md.update(fs.getType().getBytes(ENCODING)); - if (fs.getComment() != null) { - md.update(fs.getComment().getBytes(ENCODING)); - } - } - if (sd.getInputFormat() != null) { - md.update(sd.getInputFormat().getBytes(ENCODING)); - } - if (sd.getOutputFormat() != null) { - md.update(sd.getOutputFormat().getBytes(ENCODING)); - } - md.update(sd.isCompressed() ? "true".getBytes(ENCODING) : "false".getBytes(ENCODING)); - md.update(Integer.toString(sd.getNumBuckets()).getBytes(ENCODING)); - if (sd.getSerdeInfo() != null) { - SerDeInfo serde = sd.getSerdeInfo(); - if (serde.getName() != null) { - md.update(serde.getName().getBytes(ENCODING)); - } - if (serde.getSerializationLib() != null) { - md.update(serde.getSerializationLib().getBytes(ENCODING)); - } - if (serde.getParameters() != null) { - SortedMap params = new TreeMap<>(serde.getParameters()); - for (Entry param : params.entrySet()) { - md.update(param.getKey().getBytes(ENCODING)); - md.update(param.getValue().getBytes(ENCODING)); - } - } - } - if (sd.getBucketCols() != null) { - List bucketCols = new ArrayList<>(sd.getBucketCols()); - for (String bucket : bucketCols) { - md.update(bucket.getBytes(ENCODING)); - } - } - if (sd.getSortCols() != null) { - SortedSet orders = new TreeSet<>(sd.getSortCols()); - for (Order order : orders) { - md.update(order.getCol().getBytes(ENCODING)); - md.update(Integer.toString(order.getOrder()).getBytes(ENCODING)); - } - } - if (sd.getSkewedInfo() != null) { - SkewedInfo skewed = sd.getSkewedInfo(); - if (skewed.getSkewedColNames() != null) { - SortedSet colnames = new TreeSet<>(skewed.getSkewedColNames()); - for (String colname : colnames) { - md.update(colname.getBytes(ENCODING)); - } - } - if (skewed.getSkewedColValues() != null) { - SortedSet sortedOuterList = new TreeSet<>(); - for (List innerList : skewed.getSkewedColValues()) { - SortedSet sortedInnerList = new TreeSet<>(innerList); - sortedOuterList.add(StringUtils.join(sortedInnerList, ".")); - } - for (String colval : sortedOuterList) { - md.update(colval.getBytes(ENCODING)); - } - } - if (skewed.getSkewedColValueLocationMaps() != null) { - SortedMap sortedMap = new TreeMap<>(); - for (Entry, String> smap : skewed.getSkewedColValueLocationMaps().entrySet()) { - SortedSet sortedKey = new TreeSet<>(smap.getKey()); - sortedMap.put(StringUtils.join(sortedKey, "."), smap.getValue()); - } - for (Entry e : sortedMap.entrySet()) { - md.update(e.getKey().getBytes(ENCODING)); - md.update(e.getValue().getBytes(ENCODING)); - } - } - md.update(sd.isStoredAsSubDirectories() ? "true".getBytes(ENCODING) : "false".getBytes(ENCODING)); - } - - return md.digest(); - } - /** * Verify if the user is allowed to make DB notification related calls. * Only the superusers defined in the Hadoop proxy user settings have the permission. diff --git a/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestOrcSplitElimination.java b/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestOrcSplitElimination.java index b26401db180e..5c6bbbd210cf 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestOrcSplitElimination.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestOrcSplitElimination.java @@ -41,7 +41,7 @@ import org.apache.hadoop.hive.conf.HiveConf.ConfVars; import org.apache.hadoop.hive.metastore.api.MetadataPpdResult; import org.apache.hadoop.hive.metastore.filemeta.OrcFileMetadataHandler; -import org.apache.hadoop.hive.metastore.hbase.MetadataStore; +import org.apache.hadoop.hive.metastore.MetadataStore; import org.apache.hadoop.hive.ql.exec.SerializationUtilities; import org.apache.hadoop.hive.ql.io.orc.ExternalCache.ExternalFooterCachesByConf; import org.apache.hadoop.hive.ql.metadata.HiveException; diff --git a/standalone-metastore/pom.xml b/standalone-metastore/pom.xml index c222e8c5e49d..07b767bc138d 100644 --- a/standalone-metastore/pom.xml +++ b/standalone-metastore/pom.xml @@ -33,9 +33,9 @@ - com.jolbox - bonecp - ${bonecp.version} + com.fasterxml.jackson.core + jackson-databind + ${jackson.new.version} com.github.joshelser @@ -52,6 +52,11 @@ protobuf-java ${protobuf.version} + + com.jolbox + bonecp + ${bonecp.version} + com.zaxxer HikariCP diff --git a/common/src/java/org/apache/hadoop/hive/common/StatsSetupConst.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/common/StatsSetupConst.java similarity index 97% rename from common/src/java/org/apache/hadoop/hive/common/StatsSetupConst.java rename to standalone-metastore/src/main/java/org/apache/hadoop/hive/common/StatsSetupConst.java index 7c27d07024e4..e2e3ada9e8fd 100644 --- a/common/src/java/org/apache/hadoop/hive/common/StatsSetupConst.java +++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/common/StatsSetupConst.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -23,7 +23,8 @@ import java.util.TreeMap; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.metastore.conf.MetastoreConf; +import org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -66,10 +67,10 @@ public String getAggregator(Configuration conf) { custom { @Override public String getPublisher(Configuration conf) { - return HiveConf.getVar(conf, HiveConf.ConfVars.HIVE_STATS_DEFAULT_PUBLISHER); } + return MetastoreConf.getVar(conf, ConfVars.STATS_DEFAULT_PUBLISHER); } @Override public String getAggregator(Configuration conf) { - return HiveConf.getVar(conf, HiveConf.ConfVars.HIVE_STATS_DEFAULT_AGGREGATOR); } + return MetastoreConf.getVar(conf, ConfVars.STATS_DEFAULT_AGGREGATOR); } }; public abstract String getPublisher(Configuration conf); public abstract String getAggregator(Configuration conf); @@ -170,7 +171,7 @@ static class BooleanSerializer extends JsonSerializer { @Override public void serialize(Boolean value, JsonGenerator jsonGenerator, - SerializerProvider serializerProvider) throws IOException, JsonProcessingException { + SerializerProvider serializerProvider) throws IOException { jsonGenerator.writeString(value.toString()); } } @@ -179,7 +180,7 @@ static class BooleanDeserializer extends JsonDeserializer { public Boolean deserialize(JsonParser jsonParser, DeserializationContext deserializationContext) - throws IOException, JsonProcessingException { + throws IOException { return Boolean.valueOf(jsonParser.getValueAsString()); } } @@ -196,7 +197,7 @@ public Boolean deserialize(JsonParser jsonParser, @JsonDeserialize(contentUsing = BooleanDeserializer.class) TreeMap columnStats = new TreeMap<>(); - }; + } public static boolean areBasicStatsUptoDate(Map params) { if (params == null) { diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/FileMetadataHandler.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/FileMetadataHandler.java similarity index 98% rename from metastore/src/java/org/apache/hadoop/hive/metastore/FileMetadataHandler.java rename to standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/FileMetadataHandler.java index 832daec736e1..4c14ab0a11ba 100644 --- a/metastore/src/java/org/apache/hadoop/hive/metastore/FileMetadataHandler.java +++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/FileMetadataHandler.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -28,7 +28,6 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hive.metastore.api.FileMetadataExprType; -import org.apache.hadoop.hive.metastore.hbase.MetadataStore; /** * The base implementation of a file metadata handler for a specific file type. diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/MetadataStore.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/MetadataStore.java similarity index 97% rename from metastore/src/java/org/apache/hadoop/hive/metastore/hbase/MetadataStore.java rename to standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/MetadataStore.java index d427fef71d51..26e2c499ad14 100644 --- a/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/MetadataStore.java +++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/MetadataStore.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +16,7 @@ * limitations under the License. */ -package org.apache.hadoop.hive.metastore.hbase; +package org.apache.hadoop.hive.metastore; import java.io.IOException; import java.nio.ByteBuffer; diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java similarity index 93% rename from metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java rename to standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java index 2f27c3c4332c..2f05388a8220 100644 --- a/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java +++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,6 +19,7 @@ package org.apache.hadoop.hive.metastore; import static org.apache.commons.lang.StringUtils.join; +import static org.apache.hadoop.hive.metastore.utils.StringUtils.normalizeIdentifier; import java.io.IOException; import java.lang.reflect.Field; @@ -65,16 +66,12 @@ import com.codahale.metrics.MetricRegistry; import org.apache.commons.lang.ArrayUtils; import org.apache.commons.lang.exception.ExceptionUtils; +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configurable; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hive.common.FileUtils; -import org.apache.hadoop.hive.common.ObjectPair; import org.apache.hadoop.hive.common.StatsSetupConst; -import org.apache.hadoop.hive.common.classification.InterfaceAudience; -import org.apache.hadoop.hive.common.classification.InterfaceStability; -import org.apache.hadoop.hive.conf.HiveConf; -import org.apache.hadoop.hive.conf.HiveConf.ConfVars; import org.apache.hadoop.hive.metastore.MetaStoreDirectSql.SqlFilterForPushdown; import org.apache.hadoop.hive.metastore.api.AggrStats; import org.apache.hadoop.hive.metastore.api.ColumnStatistics; @@ -129,6 +126,7 @@ import org.apache.hadoop.hive.metastore.conf.MetastoreConf; import org.apache.hadoop.hive.metastore.datasource.DataSourceProvider; import org.apache.hadoop.hive.metastore.datasource.DataSourceProviderFactory; +import org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars; import org.apache.hadoop.hive.metastore.metrics.Metrics; import org.apache.hadoop.hive.metastore.metrics.MetricsConstants; import org.apache.hadoop.hive.metastore.model.MColumnDescriptor; @@ -166,10 +164,11 @@ import org.apache.hadoop.hive.metastore.parser.ExpressionTree.FilterBuilder; import org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy; import org.apache.hadoop.hive.metastore.tools.SQLGenerator; -import org.apache.hadoop.hive.serde2.typeinfo.PrimitiveTypeInfo; -import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory; +import org.apache.hadoop.hive.metastore.utils.FileUtils; +import org.apache.hadoop.hive.metastore.utils.JavaUtils; +import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils; +import org.apache.hadoop.hive.metastore.utils.ObjectPair; import org.apache.hadoop.util.StringUtils; -import org.apache.hive.common.util.HiveStringUtils; import org.apache.thrift.TException; import org.datanucleus.AbstractNucleusContext; import org.datanucleus.ClassLoaderResolver; @@ -207,7 +206,7 @@ public class ObjectStore implements RawStore, Configurable { private final static AtomicBoolean isSchemaVerified = new AtomicBoolean(false); private static final Logger LOG = LoggerFactory.getLogger(ObjectStore.class.getName()); - private static enum TXN_STATUS { + private enum TXN_STATUS { NO_STATE, OPEN, COMMITED, ROLLBACK } @@ -216,7 +215,7 @@ private static enum TXN_STATUS { private static final String USER; private static final String JDO_PARAM = ":param"; static { - Map map = new HashMap(); + Map map = new HashMap<>(); map.put("table", MTable.class); map.put("storagedescriptor", MStorageDescriptor.class); map.put("serdeinfo", MSerDeInfo.class); @@ -247,7 +246,7 @@ private static enum TXN_STATUS { private SQLGenerator sqlGenerator = null; private MetaStoreDirectSql directSql = null; private PartitionExpressionProxy expressionProxy = null; - private Configuration hiveConf; + private Configuration conf; private volatile int openTrasactionCalls = 0; private Transaction currentTransaction = null; private TXN_STATUS transactionStatus = TXN_STATUS.NO_STATE; @@ -278,7 +277,7 @@ public ObjectStore() { @Override public Configuration getConf() { - return hiveConf; + return conf; } /** @@ -295,7 +294,7 @@ public void setConf(Configuration conf) { pmfPropLock.lock(); try { isInitialized = false; - hiveConf = conf; + this.conf = conf; configureSSL(conf); Properties propsFromConf = getDataSourceProps(conf); boolean propsChanged = !propsFromConf.equals(prop); @@ -326,7 +325,7 @@ public void setConf(Configuration conf) { initialize(propsFromConf); String partitionValidationRegex = - hiveConf.get(HiveConf.ConfVars.METASTORE_PARTITION_NAME_WHITELIST_PATTERN.name()); + MetastoreConf.getVar(this.conf, ConfVars.PARTITION_NAME_WHITELIST_PATTERN); if (partitionValidationRegex != null && !partitionValidationRegex.isEmpty()) { partitionValidationPattern = Pattern.compile(partitionValidationRegex); } else { @@ -361,10 +360,9 @@ public void setConf(Configuration conf) { @SuppressWarnings("nls") private void initialize(Properties dsProps) { - int retryLimit = HiveConf.getIntVar(hiveConf, - HiveConf.ConfVars.HMSHANDLERATTEMPTS); - long retryInterval = HiveConf.getTimeVar(hiveConf, - HiveConf.ConfVars.HMSHANDLERINTERVAL, TimeUnit.MILLISECONDS); + int retryLimit = MetastoreConf.getIntVar(conf, ConfVars.HMSHANDLERATTEMPTS); + long retryInterval = MetastoreConf.getTimeVar(conf, + ConfVars.HMSHANDLERINTERVAL, TimeUnit.MILLISECONDS); int numTries = retryLimit; while (numTries > 0){ @@ -400,7 +398,7 @@ private void initialize(Properties dsProps) { } private static final Set> retriableExceptionClasses = - new HashSet>(Arrays.asList(JDOCanRetryException.class)); + new HashSet<>(Arrays.asList(JDOCanRetryException.class)); /** * Helper function for initialize to determine if we should retry an exception. * We return true if the exception is of a known type of retriable exceptions, or if one @@ -444,13 +442,13 @@ private void initializeHelper(Properties dsProps) { } isInitialized = pm != null; if (isInitialized) { - expressionProxy = createExpressionProxy(hiveConf); - if (HiveConf.getBoolVar(getConf(), ConfVars.METASTORE_TRY_DIRECT_SQL)) { + expressionProxy = createExpressionProxy(conf); + if (MetastoreConf.getBoolVar(getConf(), ConfVars.TRY_DIRECT_SQL)) { String schema = prop.getProperty("javax.jdo.mapping.Schema"); if (schema != null && schema.isEmpty()) { schema = null; } - directSql = new MetaStoreDirectSql(pm, hiveConf, schema); + directSql = new MetaStoreDirectSql(pm, conf, schema); } } LOG.debug("RawStore: " + this + ", with PersistenceManager: " + pm + @@ -465,13 +463,12 @@ private void initializeHelper(Properties dsProps) { * @return The partition expression proxy. */ private static PartitionExpressionProxy createExpressionProxy(Configuration conf) { - String className = HiveConf.getVar(conf, HiveConf.ConfVars.METASTORE_EXPRESSION_PROXY_CLASS); + String className = MetastoreConf.getVar(conf, ConfVars.EXPRESSION_PROXY_CLASS); try { @SuppressWarnings("unchecked") Class clazz = - (Class)MetaStoreUtils.getClass(className); - return MetaStoreUtils.newInstance( - clazz, new Class[0], new Object[0]); + JavaUtils.getClass(className, PartitionExpressionProxy.class); + return JavaUtils.newInstance(clazz, new Class[0], new Object[0]); } catch (MetaException e) { LOG.error("Error loading PartitionExpressionProxy", e); throw new RuntimeException("Error loading PartitionExpressionProxy: " + e.getMessage()); @@ -484,7 +481,7 @@ private static PartitionExpressionProxy createExpressionProxy(Configuration conf */ private static void configureSSL(Configuration conf) { // SSL support - String sslPropString = conf.get(HiveConf.ConfVars.METASTORE_DBACCESS_SSL_PROPS.varname); + String sslPropString = MetastoreConf.getVar(conf, ConfVars.DBACCESS_SSL_PROPS); if (org.apache.commons.lang.StringUtils.isNotEmpty(sslPropString)) { LOG.info("Metastore setting SSL properties of the connection to backed DB"); for (String sslProp : sslPropString.split(",")) { @@ -492,7 +489,7 @@ private static void configureSSL(Configuration conf) { if (pair != null && pair.length == 2) { System.setProperty(pair[0].trim(), pair[1].trim()); } else { - LOG.warn("Invalid metastore property value for " + HiveConf.ConfVars.METASTORE_DBACCESS_SSL_PROPS); + LOG.warn("Invalid metastore property value for " + ConfVars.DBACCESS_SSL_PROPS); } } } @@ -507,23 +504,20 @@ private static Properties getDataSourceProps(Configuration conf) { Properties prop = new Properties(); correctAutoStartMechanism(conf); - Iterator> iter = conf.iterator(); - while (iter.hasNext()) { - Map.Entry e = iter.next(); - if (e.getKey().contains("datanucleus") || e.getKey().contains("jdo")) { - Object prevVal = prop.setProperty(e.getKey(), conf.get(e.getKey())); - if (LOG.isDebugEnabled() - && !e.getKey().equals(HiveConf.ConfVars.METASTOREPWD.varname)) { - LOG.debug("Overriding " + e.getKey() + " value " + prevVal - + " from jpox.properties with " + e.getValue()); - } + for (ConfVars var : MetastoreConf.dataNucleusAndJdoConfs) { + String confVal = MetastoreConf.getAsString(conf, var); + Object prevVal = prop.setProperty(var.varname, confVal); + if (LOG.isDebugEnabled() && MetastoreConf.isPrintable(var.varname)) { + LOG.debug("Overriding " + var.varname + " value " + prevVal + + " from jpox.properties with " + confVal); } } // Password may no longer be in the conf, use getPassword() try { String passwd = MetastoreConf.getPassword(conf, MetastoreConf.ConfVars.PWD); if (passwd != null && !passwd.isEmpty()) { - prop.setProperty(HiveConf.ConfVars.METASTOREPWD.varname, passwd); + // We can get away with the use of varname here because varname == hiveName for PWD + prop.setProperty(ConfVars.PWD.varname, passwd); } } catch (IOException err) { throw new RuntimeException("Error getting metastore password: " + err.getMessage(), err); @@ -531,7 +525,7 @@ private static Properties getDataSourceProps(Configuration conf) { if (LOG.isDebugEnabled()) { for (Entry e : prop.entrySet()) { - if (!e.getKey().equals(HiveConf.ConfVars.METASTOREPWD.varname)) { + if (MetastoreConf.isPrintable(e.getKey().toString())) { LOG.debug(e.getKey() + " = " + e.getValue()); } } @@ -561,7 +555,7 @@ private static void correctAutoStartMechanism(Configuration conf) { private static synchronized PersistenceManagerFactory getPMF() { if (pmf == null) { - HiveConf conf = new HiveConf(ObjectStore.class); + Configuration conf = MetastoreConf.newMetastoreConf(); DataSourceProvider dsp = DataSourceProviderFactory.getDataSourceProvider(conf); if (dsp == null) { pmf = JDOHelper.getPersistenceManagerFactory(prop); @@ -583,7 +577,7 @@ private static synchronized PersistenceManagerFactory getPMF() { } DataStoreCache dsc = pmf.getDataStoreCache(); if (dsc != null) { - String objTypes = HiveConf.getVar(conf, HiveConf.ConfVars.METASTORE_CACHE_PINOBJTYPES); + String objTypes = MetastoreConf.getVar(conf, ConfVars.CACHE_PINOBJTYPES); LOG.info("Setting MetaStore object pin classes with hive.metastore.cache.pinobjtypes=\"" + objTypes + "\""); if (objTypes != null && objTypes.length() > 0) { objTypes = objTypes.toLowerCase(); @@ -752,7 +746,7 @@ private MDatabase getMDatabase(String name) throws NoSuchObjectException { Query query = null; try { openTransaction(); - name = HiveStringUtils.normalizeIdentifier(name); + name = normalizeIdentifier(name); query = pm.newQuery(MDatabase.class, "name == dbname"); query.declareParameters("java.lang.String dbname"); query.setUnique(true); @@ -868,7 +862,7 @@ public boolean alterDatabase(String dbName, Database db) public boolean dropDatabase(String dbname) throws NoSuchObjectException, MetaException { boolean success = false; LOG.info("Dropping database " + dbname + " along with all tables"); - dbname = HiveStringUtils.normalizeIdentifier(dbname); + dbname = normalizeIdentifier(dbname); QueryWrapper queryWrapper = new QueryWrapper(); try { openTransaction(); @@ -910,7 +904,7 @@ public List getDatabases(String pattern) throws MetaException { query.setResult("name"); query.setOrdering("name ascending"); Collection names = (Collection) query.executeWithArray(parameterVals.toArray(new String[parameterVals.size()])); - databases = new ArrayList(); + databases = new ArrayList<>(); for (Iterator i = names.iterator(); i.hasNext();) { databases.add((String) i.next()); } @@ -933,7 +927,7 @@ public List getAllDatabases() throws MetaException { try { query = pm.newQuery(queryStr); query.setResult("name"); - databases = new ArrayList((Collection) query.execute()); + databases = new ArrayList<>((Collection) query.execute()); commited = commitTransaction(); } finally { rollbackAndCleanup(commited, query); @@ -943,7 +937,7 @@ public List getAllDatabases() throws MetaException { } private MType getMType(Type type) { - List fields = new ArrayList(); + List fields = new ArrayList<>(); if (type.getFields() != null) { for (FieldSchema field : type.getFields()) { fields.add(new MFieldSchema(field.getName(), field.getType(), field @@ -954,7 +948,7 @@ private MType getMType(Type type) { } private Type getType(MType mtype) { - List fields = new ArrayList(); + List fields = new ArrayList<>(); if (mtype.getFields() != null) { for (MFieldSchema field : mtype.getFields()) { fields.add(new FieldSchema(field.getName(), field.getType(), field @@ -1068,7 +1062,7 @@ public void createTable(Table tbl) throws InvalidObjectException, MetaException pm.makePersistent(mtbl); PrincipalPrivilegeSet principalPrivs = tbl.getPrivileges(); - List toPersistPrivObjs = new ArrayList(); + List toPersistPrivObjs = new ArrayList<>(); if (principalPrivs != null) { int now = (int)(System.currentTimeMillis()/1000); @@ -1182,11 +1176,11 @@ public boolean dropTable(String dbName, String tableName) throws MetaException, private List listAllTableConstraintsWithOptionalConstraintName (String dbName, String tableName, String constraintname) { - dbName = HiveStringUtils.normalizeIdentifier(dbName); - tableName = HiveStringUtils.normalizeIdentifier(tableName); - constraintname = constraintname!=null?HiveStringUtils.normalizeIdentifier(constraintname):null; + dbName = normalizeIdentifier(dbName); + tableName = normalizeIdentifier(tableName); + constraintname = constraintname!=null?normalizeIdentifier(constraintname):null; List mConstraints = null; - List constraintNames = new ArrayList(); + List constraintNames = new ArrayList<>(); Query query = null; try { @@ -1212,7 +1206,7 @@ public boolean dropTable(String dbName, String tableName) throws MetaException, query.setFilter("param.contains(constraintName)"); query.declareParameters("java.util.Collection param"); Collection constraints = (Collection)query.execute(constraintNames); - mConstraints = new ArrayList(); + mConstraints = new ArrayList<>(); for (Iterator i = constraints.iterator(); i.hasNext();) { MConstraint currConstraint = (MConstraint) i.next(); mConstraints.add(currConstraint); @@ -1253,7 +1247,7 @@ public List getTables(String dbName, String pattern, TableType tableType List tbls = null; try { openTransaction(); - dbName = HiveStringUtils.normalizeIdentifier(dbName); + dbName = normalizeIdentifier(dbName); // Take the pattern and split it on the | to get all the composing // patterns List parameterVals = new ArrayList<>(); @@ -1271,7 +1265,7 @@ public List getTables(String dbName, String pattern, TableType tableType query.setResult("tableName"); query.setOrdering("tableName ascending"); Collection names = (Collection) query.executeWithArray(parameterVals.toArray(new String[parameterVals.size()])); - tbls = new ArrayList(); + tbls = new ArrayList<>(); for (Iterator i = names.iterator(); i.hasNext();) { tbls.add((String) i.next()); } @@ -1320,7 +1314,7 @@ public List getTableMeta(String dbNames, String tableNames, List metas = new ArrayList(); + List metas = new ArrayList<>(); try { openTransaction(); // Take the pattern and split it on the | to get all the composing @@ -1359,7 +1353,7 @@ private StringBuilder appendPatternCondition(StringBuilder filterBuilder, String private StringBuilder appendPatternCondition(StringBuilder builder, String fieldName, String elements, List parameters) { - elements = HiveStringUtils.normalizeIdentifier(elements); + elements = normalizeIdentifier(elements); return appendCondition(builder, fieldName, elements.split("\\|"), true, parameters); } @@ -1418,8 +1412,8 @@ private AttachedMTableInfo getMTable(String db, String table, boolean retrieveCD Query query = null; try { openTransaction(); - db = HiveStringUtils.normalizeIdentifier(db); - table = HiveStringUtils.normalizeIdentifier(table); + db = normalizeIdentifier(db); + table = normalizeIdentifier(table); query = pm.newQuery(MTable.class, "tableName == table && database.name == db"); query.declareParameters("java.lang.String table, java.lang.String db"); query.setUnique(true); @@ -1447,13 +1441,13 @@ private MTable getMTable(String db, String table) { @Override public List getTableObjectsByName(String db, List tbl_names) throws MetaException, UnknownDBException { - List
tables = new ArrayList
(); + List
tables = new ArrayList<>(); boolean committed = false; Query dbExistsQuery = null; Query query = null; try { openTransaction(); - db = HiveStringUtils.normalizeIdentifier(db); + db = normalizeIdentifier(db); dbExistsQuery = pm.newQuery(MDatabase.class, "name == db"); dbExistsQuery.declareParameters("java.lang.String db"); dbExistsQuery.setUnique(true); @@ -1463,9 +1457,9 @@ public List
getTableObjectsByName(String db, List tbl_names) thro throw new UnknownDBException("Could not find database " + db); } - List lowered_tbl_names = new ArrayList(); + List lowered_tbl_names = new ArrayList<>(); for (String t : tbl_names) { - lowered_tbl_names.add(HiveStringUtils.normalizeIdentifier(t)); + lowered_tbl_names.add(normalizeIdentifier(t)); } query = pm.newQuery(MTable.class); query.setFilter("database.name == db && tbl_names.contains(tableName)"); @@ -1492,7 +1486,7 @@ private List convertList(List dnList) { /** Makes shallow copy of a map to avoid DataNucleus mucking with our objects. */ private Map convertMap(Map dnMap) { return MetaStoreUtils.trimMapNulls(dnMap, - HiveConf.getBoolVar(getConf(), ConfVars.METASTORE_ORM_RETRIEVE_MAPNULLS_AS_EMPTY_STRINGS)); + MetastoreConf.getBoolVar(getConf(), ConfVars.ORM_RETRIEVE_MAPNULLS_AS_EMPTY_STRINGS)); } private Table convertToTable(MTable mtbl) throws MetaException { @@ -1549,7 +1543,7 @@ private MTable convertToMTable(Table tbl) throws InvalidObjectException, } // A new table is always created with a new column descriptor - return new MTable(HiveStringUtils.normalizeIdentifier(tbl.getTableName()), mdb, + return new MTable(normalizeIdentifier(tbl.getTableName()), mdb, convertToMStorageDescriptor(tbl.getSd()), tbl.getOwner(), tbl .getCreateTime(), tbl.getLastAccessTime(), tbl.getRetention(), convertToMFieldSchemas(tbl.getPartitionKeys()), tbl.getParameters(), @@ -1560,7 +1554,7 @@ private MTable convertToMTable(Table tbl) throws InvalidObjectException, private List convertToMFieldSchemas(List keys) { List mkeys = null; if (keys != null) { - mkeys = new ArrayList(keys.size()); + mkeys = new ArrayList<>(keys.size()); for (FieldSchema part : keys) { mkeys.add(new MFieldSchema(part.getName().toLowerCase(), part.getType(), part.getComment())); @@ -1572,7 +1566,7 @@ private List convertToMFieldSchemas(List keys) { private List convertToFieldSchemas(List mkeys) { List keys = null; if (mkeys != null) { - keys = new ArrayList(mkeys.size()); + keys = new ArrayList<>(mkeys.size()); for (MFieldSchema part : mkeys) { keys.add(new FieldSchema(part.getName(), part.getType(), part .getComment())); @@ -1584,9 +1578,9 @@ private List convertToFieldSchemas(List mkeys) { private List convertToMOrders(List keys) { List mkeys = null; if (keys != null) { - mkeys = new ArrayList(keys.size()); + mkeys = new ArrayList<>(keys.size()); for (Order part : keys) { - mkeys.add(new MOrder(HiveStringUtils.normalizeIdentifier(part.getCol()), part.getOrder())); + mkeys.add(new MOrder(normalizeIdentifier(part.getCol()), part.getOrder())); } } return mkeys; @@ -1595,7 +1589,7 @@ private List convertToMOrders(List keys) { private List convertToOrders(List mkeys) { List keys = null; if (mkeys != null) { - keys = new ArrayList(mkeys.size()); + keys = new ArrayList<>(mkeys.size()); for (MOrder part : mkeys) { keys.add(new Order(part.getCol(), part.getOrder())); } @@ -1667,9 +1661,9 @@ private StorageDescriptor convertToStorageDescriptor(MStorageDescriptor msd) private List> convertToSkewedValues(List mLists) { List> lists = null; if (mLists != null) { - lists = new ArrayList>(mLists.size()); + lists = new ArrayList<>(mLists.size()); for (MStringList element : mLists) { - lists.add(new ArrayList(element.getInternalList())); + lists.add(new ArrayList<>(element.getInternalList())); } } return lists; @@ -1678,7 +1672,7 @@ private List> convertToSkewedValues(List mLists) { private List convertToMStringLists(List> mLists) { List lists = null ; if (null != mLists) { - lists = new ArrayList(); + lists = new ArrayList<>(); for (List mList : mLists) { lists.add(new MStringList(mList)); } @@ -1694,10 +1688,10 @@ private List convertToMStringLists(List> mLists) { private Map, String> covertToSkewedMap(Map mMap) { Map, String> map = null; if (mMap != null) { - map = new HashMap, String>(mMap.size()); + map = new HashMap<>(mMap.size()); Set keys = mMap.keySet(); for (MStringList key : keys) { - map.put(new ArrayList(key.getInternalList()), mMap.get(key)); + map.put(new ArrayList<>(key.getInternalList()), mMap.get(key)); } } return map; @@ -1711,7 +1705,7 @@ private Map, String> covertToSkewedMap(Map mMa private Map covertToMapMStringList(Map, String> mMap) { Map map = null; if (mMap != null) { - map = new HashMap(mMap.size()); + map = new HashMap<>(mMap.size()); Set> keys = mMap.keySet(); for (List key : keys) { map.put(new MStringList(key), mMap.get(key)); @@ -1776,7 +1770,7 @@ public boolean addPartitions(String dbName, String tblName, List part tabGrants = this.listAllTableGrants(dbName, tblName); tabColumnGrants = this.listTableAllColumnGrants(dbName, tblName); } - List toPersist = new ArrayList(); + List toPersist = new ArrayList<>(); for (Partition part : parts) { if (!part.getTableName().equals(tblName) || !part.getDbName().equals(dbName)) { throw new MetaException("Partition does not belong to target table " @@ -1904,7 +1898,7 @@ public boolean addPartition(Partition part) throws InvalidObjectException, pm.makePersistent(mpart); int now = (int)(System.currentTimeMillis()/1000); - List toPersist = new ArrayList(); + List toPersist = new ArrayList<>(); if (tabGrants != null) { for (MTablePrivilege tab: tabGrants) { MPartitionPrivilege partGrant = new MPartitionPrivilege(tab @@ -1961,8 +1955,8 @@ private MPartition getMPartition(String dbName, String tableName, List p Query query = null; try { openTransaction(); - dbName = HiveStringUtils.normalizeIdentifier(dbName); - tableName = HiveStringUtils.normalizeIdentifier(tableName); + dbName = normalizeIdentifier(dbName); + tableName = normalizeIdentifier(tableName); MTable mtbl = getMTable(dbName, tableName); if (mtbl == null) { commited = commitTransaction(); @@ -2128,7 +2122,7 @@ private boolean dropPartitionCommon(MPartition part) throws NoSuchObjectExceptio openTransaction(); if (part != null) { List schemas = part.getTable().getPartitionKeys(); - List colNames = new ArrayList(); + List colNames = new ArrayList<>(); for (MFieldSchema col: schemas) { colNames.add(col.getName()); } @@ -2211,7 +2205,7 @@ public List getPartitionsWithAuth(String dbName, String tblName, try { openTransaction(); List mparts = listMPartitions(dbName, tblName, max, queryWrapper); - List parts = new ArrayList(mparts.size()); + List parts = new ArrayList<>(mparts.size()); if (mparts != null && mparts.size()>0) { for (MPartition mpart : mparts) { MTable mtbl = mpart.getTable(); @@ -2277,7 +2271,7 @@ private List convertToParts(List src, List des return dest; } if (dest == null) { - dest = new ArrayList(src.size()); + dest = new ArrayList<>(src.size()); } for (MPartition mp : src) { dest.add(convertToPart(mp)); @@ -2288,7 +2282,7 @@ private List convertToParts(List src, List des private List convertToParts(String dbName, String tblName, List mparts) throws MetaException { - List parts = new ArrayList(mparts.size()); + List parts = new ArrayList<>(mparts.size()); for (MPartition mp : mparts) { parts.add(convertToPart(dbName, tblName, mp)); Deadline.checkTimeout(); @@ -2560,9 +2554,9 @@ private PartitionValuesResponse getDistinctValuesForPartitionsNoTxn(String dbNam } private List getPartitionNamesNoTxn(String dbName, String tableName, short max) { - List pns = new ArrayList(); - dbName = HiveStringUtils.normalizeIdentifier(dbName); - tableName = HiveStringUtils.normalizeIdentifier(tableName); + List pns = new ArrayList<>(); + dbName = normalizeIdentifier(dbName); + tableName = normalizeIdentifier(tableName); Query query = pm.newQuery("select partitionName from org.apache.hadoop.hive.metastore.model.MPartition " + "where table.database.name == t1 && table.tableName == t2 " @@ -2601,8 +2595,8 @@ private List getPartitionNamesNoTxn(String dbName, String tableName, sho private Collection getPartitionPsQueryResults(String dbName, String tableName, List part_vals, short max_parts, String resultsCol, QueryWrapper queryWrapper) throws MetaException, NoSuchObjectException { - dbName = HiveStringUtils.normalizeIdentifier(dbName); - tableName = HiveStringUtils.normalizeIdentifier(tableName); + dbName = normalizeIdentifier(dbName); + tableName = normalizeIdentifier(tableName); Table table = getTable(dbName, tableName); if (table == null) { throw new NoSuchObjectException(dbName + "." + tableName + " table not found"); @@ -2646,7 +2640,7 @@ private Collection getPartitionPsQueryResults(String dbName, String tableName, public List listPartitionsPsWithAuth(String db_name, String tbl_name, List part_vals, short max_parts, String userName, List groupNames) throws MetaException, InvalidObjectException, NoSuchObjectException { - List partitions = new ArrayList(); + List partitions = new ArrayList<>(); boolean success = false; QueryWrapper queryWrapper = new QueryWrapper(); @@ -2679,7 +2673,7 @@ public List listPartitionsPsWithAuth(String db_name, String tbl_name, @Override public List listPartitionNamesPs(String dbName, String tableName, List part_vals, short max_parts) throws MetaException, NoSuchObjectException { - List partitionNames = new ArrayList(); + List partitionNames = new ArrayList<>(); boolean success = false; QueryWrapper queryWrapper = new QueryWrapper(); @@ -2705,8 +2699,8 @@ private List listMPartitions(String dbName, String tableName, int ma try { openTransaction(); LOG.debug("Executing listMPartitions"); - dbName = HiveStringUtils.normalizeIdentifier(dbName); - tableName = HiveStringUtils.normalizeIdentifier(tableName); + dbName = normalizeIdentifier(dbName); + tableName = normalizeIdentifier(tableName); Query query = queryWrapper.query = pm.newQuery(MPartition.class, "table.tableName == t1 && table.database.name == t2"); query.declareParameters("java.lang.String t1, java.lang.String t2"); query.setOrdering("partitionName ascending"); @@ -2773,7 +2767,7 @@ protected List getSqlResult(GetHelper> ctx) throws Me } } // We couldn't do SQL filter pushdown. Get names via normal means. - List partNames = new LinkedList(); + List partNames = new LinkedList<>(); hasUnknownPartitions.set(getPartitionNamesPrunedByExprNoTxn( ctx.getTable(), expr, defaultPartitionName, maxParts, partNames)); return directSql.getPartitionsViaSqlFilter(dbName, tblName, partNames); @@ -2789,7 +2783,7 @@ protected List getJdoResult( } if (result == null) { // We couldn't do JDOQL filter pushdown. Get names via normal means. - List partNames = new ArrayList(); + List partNames = new ArrayList<>(); hasUnknownPartitions.set(getPartitionNamesPrunedByExprNoTxn( ctx.getTable(), expr, defaultPartitionName, maxParts, partNames)); result = getPartitionsViaOrmFilter(dbName, tblName, partNames); @@ -2816,7 +2810,7 @@ private boolean getPartitionNamesPrunedByExprNoTxn(Table table, byte[] expr, result.addAll(getPartitionNamesNoTxn( table.getDbName(), table.getTableName(), maxParts)); if (defaultPartName == null || defaultPartName.isEmpty()) { - defaultPartName = HiveConf.getVar(getConf(), HiveConf.ConfVars.DEFAULTPARTITIONNAME); + defaultPartName = MetastoreConf.getVar(getConf(), ConfVars.DEFAULTPARTITIONNAME); } return expressionProxy.filterPartitionsByExpr(table.getPartitionKeys(), expr, defaultPartName, result); } @@ -2833,7 +2827,7 @@ private boolean getPartitionNamesPrunedByExprNoTxn(Table table, byte[] expr, */ private List getPartitionsViaOrmFilter(Table table, ExpressionTree tree, short maxParts, boolean isValidatedFilter) throws MetaException { - Map params = new HashMap(); + Map params = new HashMap<>(); String jdoFilter = makeQueryFilterString(table.getDbName(), table, tree, params, isValidatedFilter); if (jdoFilter == null) { @@ -2861,7 +2855,7 @@ private List getPartitionsViaOrmFilter(Table table, ExpressionTree tr private Integer getNumPartitionsViaOrmFilter(Table table, ExpressionTree tree, boolean isValidatedFilter) throws MetaException { - Map params = new HashMap(); + Map params = new HashMap<>(); String jdoFilter = makeQueryFilterString(table.getDbName(), table, tree, params, isValidatedFilter); if (jdoFilter == null) { assert !isValidatedFilter; @@ -2890,7 +2884,7 @@ private Integer getNumPartitionsViaOrmFilter(Table table, ExpressionTree tree, b private List getPartitionsViaOrmFilter( String dbName, String tblName, List partNames) throws MetaException { if (partNames.isEmpty()) { - return new ArrayList(); + return new ArrayList<>(); } ObjectPair> queryWithParams = getPartQueryWithParams(dbName, tblName, partNames); @@ -2933,7 +2927,7 @@ private HashSet detachCdsFromSdsNoTxn( @SuppressWarnings("unchecked") List sds = (List)query.executeWithMap( queryWithParams.getSecond()); - HashSet candidateCds = new HashSet(); + HashSet candidateCds = new HashSet<>(); for (MStorageDescriptor sd : sds) { if (sd != null && sd.getCD() != null) { candidateCds.add(sd.getCD()); @@ -2950,7 +2944,7 @@ private ObjectPair> getPartQueryWithParams(String dbN String tblName, List partNames) { StringBuilder sb = new StringBuilder("table.tableName == t1 && table.database.name == t2 && ("); int n = 0; - Map params = new HashMap(); + Map params = new HashMap<>(); for (Iterator itr = partNames.iterator(); itr.hasNext();) { String pn = "p" + n; n++; @@ -2964,10 +2958,10 @@ private ObjectPair> getPartQueryWithParams(String dbN Query query = pm.newQuery(); query.setFilter(sb.toString()); LOG.debug(" JDOQL filter is " + sb.toString()); - params.put("t1", HiveStringUtils.normalizeIdentifier(tblName)); - params.put("t2", HiveStringUtils.normalizeIdentifier(dbName)); + params.put("t1", normalizeIdentifier(tblName)); + params.put("t2", normalizeIdentifier(dbName)); query.declareParameters(makeParameterDeclarationString(params)); - return new ObjectPair>(query, params); + return new ObjectPair<>(query, params); } @Override @@ -2991,9 +2985,9 @@ public GetHelper(String dbName, String tblName, boolean allowSql, boolean allowJ throws MetaException { assert allowSql || allowJdo; this.allowJdo = allowJdo; - this.dbName = HiveStringUtils.normalizeIdentifier(dbName); + this.dbName = normalizeIdentifier(dbName); if (tblName != null){ - this.tblName = HiveStringUtils.normalizeIdentifier(tblName); + this.tblName = normalizeIdentifier(tblName); } else { // tblName can be null in cases of Helper being used at a higher // abstraction level, such as with datbases @@ -3006,8 +3000,8 @@ public GetHelper(String dbName, String tblName, boolean allowSql, boolean allowJ // SQL usage inside a larger transaction (e.g. droptable) may not be desirable because // some databases (e.g. Postgres) abort the entire transaction when any query fails, so // the fallback from failed SQL to JDO is not possible. - boolean isConfigEnabled = HiveConf.getBoolVar(getConf(), ConfVars.METASTORE_TRY_DIRECT_SQL) - && (HiveConf.getBoolVar(getConf(), ConfVars.METASTORE_TRY_DIRECT_SQL_DDL) || !isInTxn); + boolean isConfigEnabled = MetastoreConf.getBoolVar(getConf(), ConfVars.TRY_DIRECT_SQL) + && (MetastoreConf.getBoolVar(getConf(), ConfVars.TRY_DIRECT_SQL_DDL) || !isInTxn); if (isConfigEnabled && directSql == null) { directSql = new MetaStoreDirectSql(pm, getConf()); } @@ -3227,7 +3221,7 @@ protected String describeResult() { protected boolean canUseDirectSql(GetHelper ctx) throws MetaException { return directSql.generateSqlFilterForPushdown(ctx.getTable(), exprTree, filter); - }; + } @Override protected Integer getSqlResult(GetHelper ctx) throws MetaException { @@ -3279,7 +3273,7 @@ protected Integer getJdoResult( // if numPartitions could not be obtained from ORM filters, then get number partitions names, and count them if (numPartitions == null) { - List filteredPartNames = new ArrayList(); + List filteredPartNames = new ArrayList<>(); getPartitionNamesPrunedByExprNoTxn(ctx.getTable(), tempExpr, "", (short) -1, filteredPartNames); numPartitions = filteredPartNames.size(); } @@ -3300,7 +3294,7 @@ protected List getPartitionsByFilterInternal(String dbName, String tb @Override protected boolean canUseDirectSql(GetHelper> ctx) throws MetaException { return directSql.generateSqlFilterForPushdown(ctx.getTable(), tree, filter); - }; + } @Override protected List getSqlResult(GetHelper> ctx) throws MetaException { @@ -3414,12 +3408,12 @@ public List listTableNamesByFilter(String dbName, String filter, short m throws MetaException { boolean success = false; Query query = null; - List tableNames = new ArrayList(); + List tableNames = new ArrayList<>(); try { openTransaction(); LOG.debug("Executing listTableNamesByFilter"); - dbName = HiveStringUtils.normalizeIdentifier(dbName); - Map params = new HashMap(); + dbName = normalizeIdentifier(dbName); + Map params = new HashMap<>(); String queryFilterString = makeQueryFilterString(dbName, null, filter, params); query = pm.newQuery(MTable.class); query.declareImports("import java.lang.String"); @@ -3438,11 +3432,11 @@ public List listTableNamesByFilter(String dbName, String filter, short m query.setFilter(queryFilterString); Collection names = (Collection)query.executeWithMap(params); // have to emulate "distinct", otherwise tables with the same name may be returned - Set tableNamesSet = new HashSet(); + Set tableNamesSet = new HashSet<>(); for (Iterator i = names.iterator(); i.hasNext();) { tableNamesSet.add((String) i.next()); } - tableNames = new ArrayList(tableNamesSet); + tableNames = new ArrayList<>(tableNamesSet); LOG.debug("Done executing query for listTableNamesByFilter"); success = commitTransaction(); LOG.debug("Done retrieving all objects for listTableNamesByFilter"); @@ -3457,19 +3451,19 @@ public List listPartitionNamesByFilter(String dbName, String tableName, short maxParts) throws MetaException { boolean success = false; Query query = null; - List partNames = new ArrayList(); + List partNames = new ArrayList<>(); try { openTransaction(); LOG.debug("Executing listMPartitionNamesByFilter"); - dbName = HiveStringUtils.normalizeIdentifier(dbName); - tableName = HiveStringUtils.normalizeIdentifier(tableName); + dbName = normalizeIdentifier(dbName); + tableName = normalizeIdentifier(tableName); MTable mtable = getMTable(dbName, tableName); if (mtable == null) { // To be consistent with the behavior of listPartitionNames, if the // table or db does not exist, we return an empty list return partNames; } - Map params = new HashMap(); + Map params = new HashMap<>(); String queryFilterString = makeQueryFilterString(dbName, mtable, filter, params); query = pm.newQuery("select partitionName from org.apache.hadoop.hive.metastore.model.MPartition " @@ -3485,7 +3479,7 @@ public List listPartitionNamesByFilter(String dbName, String tableName, query.setOrdering("partitionName ascending"); query.setResult("partitionName"); Collection names = (Collection) query.executeWithMap(params); - partNames = new ArrayList(); + partNames = new ArrayList<>(); for (Iterator i = names.iterator(); i.hasNext();) { partNames.add((String) i.next()); } @@ -3504,8 +3498,8 @@ public void alterTable(String dbname, String name, Table newTable) boolean success = false; try { openTransaction(); - name = HiveStringUtils.normalizeIdentifier(name); - dbname = HiveStringUtils.normalizeIdentifier(dbname); + name = normalizeIdentifier(name); + dbname = normalizeIdentifier(dbname); MTable newt = convertToMTable(newTable); if (newt == null) { throw new InvalidObjectException("new table is invalid"); @@ -3518,7 +3512,7 @@ public void alterTable(String dbname, String name, Table newTable) // For now only alter name, owner, parameters, cols, bucketcols are allowed oldt.setDatabase(newt.getDatabase()); - oldt.setTableName(HiveStringUtils.normalizeIdentifier(newt.getTableName())); + oldt.setTableName(normalizeIdentifier(newt.getTableName())); oldt.setParameters(newt.getParameters()); oldt.setOwner(newt.getOwner()); // Fully copy over the contents of the new SD into the old SD, @@ -3547,9 +3541,9 @@ public void alterIndex(String dbname, String baseTblName, String name, Index new boolean success = false; try { openTransaction(); - name = HiveStringUtils.normalizeIdentifier(name); - baseTblName = HiveStringUtils.normalizeIdentifier(baseTblName); - dbname = HiveStringUtils.normalizeIdentifier(dbname); + name = normalizeIdentifier(name); + baseTblName = normalizeIdentifier(baseTblName); + dbname = normalizeIdentifier(dbname); MIndex newi = convertToMIndex(newIndex); if (newi == null) { throw new InvalidObjectException("new index is invalid"); @@ -3574,8 +3568,8 @@ public void alterIndex(String dbname, String baseTblName, String name, Index new private void alterPartitionNoTxn(String dbname, String name, List part_vals, Partition newPart) throws InvalidObjectException, MetaException { - name = HiveStringUtils.normalizeIdentifier(name); - dbname = HiveStringUtils.normalizeIdentifier(dbname); + name = normalizeIdentifier(name); + dbname = normalizeIdentifier(dbname); MPartition oldp = getMPartition(dbname, name, part_vals); MPartition newp = convertToMPart(newPart, false); if (oldp == null || newp == null) { @@ -3780,7 +3774,7 @@ private boolean constraintNameAlreadyExists(String name) { String constraintNameIfExists = null; try { openTransaction(); - name = HiveStringUtils.normalizeIdentifier(name); + name = normalizeIdentifier(name); constraintExistsQuery = pm.newQuery(MConstraint.class, "constraintName == name"); constraintExistsQuery.declareParameters("java.lang.String name"); constraintExistsQuery.setUnique(true); @@ -3863,7 +3857,7 @@ private String getGuidFromDB() throws MetaException { query = pm.newQuery(MMetastoreDBProperties.class, "this.propertyKey == key"); query.declareParameters("java.lang.String key"); Collection names = (Collection) query.execute("guid"); - List uuids = new ArrayList(); + List uuids = new ArrayList<>(); for (Iterator i = names.iterator(); i.hasNext();) { String uuid = i.next().getPropertyValue(); LOG.debug("Found guid " + uuid); @@ -3887,17 +3881,17 @@ private String getGuidFromDB() throws MetaException { private List addForeignKeys( List fks, boolean retrieveCD) throws InvalidObjectException, MetaException { - List fkNames = new ArrayList(); - List mpkfks = new ArrayList(); + List fkNames = new ArrayList<>(); + List mpkfks = new ArrayList<>(); String currentConstraintName = null; for (int i = 0; i < fks.size(); i++) { - final String pkTableDB = HiveStringUtils.normalizeIdentifier(fks.get(i).getPktable_db()); - final String pkTableName = HiveStringUtils.normalizeIdentifier(fks.get(i).getPktable_name()); - final String pkColumnName =HiveStringUtils.normalizeIdentifier(fks.get(i).getPkcolumn_name()); - final String fkTableDB = HiveStringUtils.normalizeIdentifier(fks.get(i).getFktable_db()); - final String fkTableName = HiveStringUtils.normalizeIdentifier(fks.get(i).getFktable_name()); - final String fkColumnName = HiveStringUtils.normalizeIdentifier(fks.get(i).getFkcolumn_name()); + final String pkTableDB = normalizeIdentifier(fks.get(i).getPktable_db()); + final String pkTableName = normalizeIdentifier(fks.get(i).getPktable_name()); + final String pkColumnName =normalizeIdentifier(fks.get(i).getPkcolumn_name()); + final String fkTableDB = normalizeIdentifier(fks.get(i).getFktable_db()); + final String fkTableName = normalizeIdentifier(fks.get(i).getFktable_name()); + final String fkColumnName = normalizeIdentifier(fks.get(i).getFkcolumn_name()); // If retrieveCD is false, we do not need to do a deep retrieval of the Table Column Descriptor. // For instance, this is the case when we are creating the table. @@ -3940,7 +3934,7 @@ private List addForeignKeys( fkTableDB, fkTableName, pkTableDB, pkTableName, pkColumnName, fkColumnName, "fk"); } } else { - currentConstraintName = HiveStringUtils.normalizeIdentifier(fks.get(i).getFk_name()); + currentConstraintName = normalizeIdentifier(fks.get(i).getFk_name()); } fkNames.add(currentConstraintName); Integer updateRule = fks.get(i).getUpdate_rule(); @@ -3975,14 +3969,14 @@ public List addPrimaryKeys(List pks) throws InvalidObject private List addPrimaryKeys(List pks, boolean retrieveCD) throws InvalidObjectException, MetaException { - List pkNames = new ArrayList(); - List mpks = new ArrayList(); + List pkNames = new ArrayList<>(); + List mpks = new ArrayList<>(); String constraintName = null; for (int i = 0; i < pks.size(); i++) { - final String tableDB = HiveStringUtils.normalizeIdentifier(pks.get(i).getTable_db()); - final String tableName = HiveStringUtils.normalizeIdentifier(pks.get(i).getTable_name()); - final String columnName = HiveStringUtils.normalizeIdentifier(pks.get(i).getColumn_name()); + final String tableDB = normalizeIdentifier(pks.get(i).getTable_db()); + final String tableName = normalizeIdentifier(pks.get(i).getTable_name()); + final String columnName = normalizeIdentifier(pks.get(i).getColumn_name()); // If retrieveCD is false, we do not need to do a deep retrieval of the Table Column Descriptor. // For instance, this is the case when we are creating the table. @@ -4009,7 +4003,7 @@ private List addPrimaryKeys(List pks, boolean retrieveCD) constraintName = generateConstraintName(tableDB, tableName, columnName, "pk"); } } else { - constraintName = HiveStringUtils.normalizeIdentifier(pks.get(i).getPk_name()); + constraintName = normalizeIdentifier(pks.get(i).getPk_name()); } pkNames.add(constraintName); int enableValidateRely = (pks.get(i).isEnable_cstr() ? 4 : 0) + @@ -4041,14 +4035,14 @@ public List addUniqueConstraints(List uks) private List addUniqueConstraints(List uks, boolean retrieveCD) throws InvalidObjectException, MetaException { - List ukNames = new ArrayList(); - List cstrs = new ArrayList(); + List ukNames = new ArrayList<>(); + List cstrs = new ArrayList<>(); String constraintName = null; for (int i = 0; i < uks.size(); i++) { - final String tableDB = HiveStringUtils.normalizeIdentifier(uks.get(i).getTable_db()); - final String tableName = HiveStringUtils.normalizeIdentifier(uks.get(i).getTable_name()); - final String columnName = HiveStringUtils.normalizeIdentifier(uks.get(i).getColumn_name()); + final String tableDB = normalizeIdentifier(uks.get(i).getTable_db()); + final String tableName = normalizeIdentifier(uks.get(i).getTable_name()); + final String columnName = normalizeIdentifier(uks.get(i).getColumn_name()); // If retrieveCD is false, we do not need to do a deep retrieval of the Table Column Descriptor. // For instance, this is the case when we are creating the table. @@ -4069,7 +4063,7 @@ private List addUniqueConstraints(List uks, boolean constraintName = generateConstraintName(tableDB, tableName, columnName, "uk"); } } else { - constraintName = HiveStringUtils.normalizeIdentifier(uks.get(i).getUk_name()); + constraintName = normalizeIdentifier(uks.get(i).getUk_name()); } ukNames.add(constraintName); @@ -4102,14 +4096,14 @@ public List addNotNullConstraints(List nns) private List addNotNullConstraints(List nns, boolean retrieveCD) throws InvalidObjectException, MetaException { - List nnNames = new ArrayList(); - List cstrs = new ArrayList(); + List nnNames = new ArrayList<>(); + List cstrs = new ArrayList<>(); String constraintName = null; for (int i = 0; i < nns.size(); i++) { - final String tableDB = HiveStringUtils.normalizeIdentifier(nns.get(i).getTable_db()); - final String tableName = HiveStringUtils.normalizeIdentifier(nns.get(i).getTable_name()); - final String columnName = HiveStringUtils.normalizeIdentifier(nns.get(i).getColumn_name()); + final String tableDB = normalizeIdentifier(nns.get(i).getTable_db()); + final String tableName = normalizeIdentifier(nns.get(i).getTable_name()); + final String columnName = normalizeIdentifier(nns.get(i).getColumn_name()); // If retrieveCD is false, we do not need to do a deep retrieval of the Table Column Descriptor. // For instance, this is the case when we are creating the table. @@ -4128,7 +4122,7 @@ private List addNotNullConstraints(List nns, boole if (nns.get(i).getNn_name() == null) { constraintName = generateConstraintName(tableDB, tableName, columnName, "nn"); } else { - constraintName = HiveStringUtils.normalizeIdentifier(nns.get(i).getNn_name()); + constraintName = normalizeIdentifier(nns.get(i).getNn_name()); } nnNames.add(constraintName); @@ -4193,7 +4187,7 @@ private MIndex convertToMIndex(Index index) throws InvalidObjectException, "Underlying index table does not exist for the given index."); } - return new MIndex(HiveStringUtils.normalizeIdentifier(index.getIndexName()), origTable, index.getCreateTime(), + return new MIndex(normalizeIdentifier(index.getIndexName()), origTable, index.getCreateTime(), index.getLastAccessTime(), index.getParameters(), indexTable, msd, index.getIndexHandlerClass(), index.isDeferredRebuild()); } @@ -4224,8 +4218,8 @@ private MIndex getMIndex(String dbName, String originalTblName, String indexName Query query = null; try { openTransaction(); - dbName = HiveStringUtils.normalizeIdentifier(dbName); - originalTblName = HiveStringUtils.normalizeIdentifier(originalTblName); + dbName = normalizeIdentifier(dbName); + originalTblName = normalizeIdentifier(originalTblName); MTable mtbl = getMTable(dbName, originalTblName); if (mtbl == null) { commited = commitTransaction(); @@ -4238,7 +4232,7 @@ private MIndex getMIndex(String dbName, String originalTblName, String indexName query.setUnique(true); midx = (MIndex) query.execute(originalTblName, dbName, - HiveStringUtils.normalizeIdentifier(indexName)); + normalizeIdentifier(indexName)); pm.retrieve(midx); commited = commitTransaction(); } finally { @@ -4288,15 +4282,15 @@ public List getIndexes(String dbName, String origTableName, int max) LOG.debug("Executing getIndexes"); openTransaction(); - dbName = HiveStringUtils.normalizeIdentifier(dbName); - origTableName = HiveStringUtils.normalizeIdentifier(origTableName); + dbName = normalizeIdentifier(dbName); + origTableName = normalizeIdentifier(origTableName); query = pm.newQuery(MIndex.class, "origTable.tableName == t1 && origTable.database.name == t2"); query.declareParameters("java.lang.String t1, java.lang.String t2"); List mIndexes = (List) query.execute(origTableName, dbName); pm.retrieveAll(mIndexes); - List indexes = new ArrayList(mIndexes.size()); + List indexes = new ArrayList<>(mIndexes.size()); for (MIndex mIdx : mIndexes) { indexes.add(this.convertToIndex(mIdx)); } @@ -4312,14 +4306,14 @@ public List getIndexes(String dbName, String origTableName, int max) @Override public List listIndexNames(String dbName, String origTableName, short max) throws MetaException { - List pns = new ArrayList(); + List pns = new ArrayList<>(); boolean success = false; Query query = null; try { openTransaction(); LOG.debug("Executing listIndexNames"); - dbName = HiveStringUtils.normalizeIdentifier(dbName); - origTableName = HiveStringUtils.normalizeIdentifier(origTableName); + dbName = normalizeIdentifier(dbName); + origTableName = normalizeIdentifier(origTableName); query = pm.newQuery("select indexName from org.apache.hadoop.hive.metastore.model.MIndex " + "where origTable.database.name == t1 && origTable.tableName == t2 " @@ -4537,7 +4531,7 @@ public boolean removeRole(String roleName) throws MetaException, */ private Set listAllRolesInHierarchy(String userName, List groupNames) { - List ret = new ArrayList(); + List ret = new ArrayList<>(); if(userName != null) { ret.addAll(listMRoles(userName, PrincipalType.USER)); } @@ -4547,7 +4541,7 @@ private Set listAllRolesInHierarchy(String userName, } } // get names of these roles and its ancestors - Set roleNames = new HashSet(); + Set roleNames = new HashSet<>(); getAllRoleAncestors(roleNames, ret); return roleNames; } @@ -4576,7 +4570,7 @@ public List listMRoles(String principalName, PrincipalType principalType) { boolean success = false; Query query = null; - List mRoleMember = new ArrayList(); + List mRoleMember = new ArrayList<>(); try { LOG.debug("Executing listRoles"); @@ -4599,7 +4593,10 @@ public List listMRoles(String principalName, if (principalType == PrincipalType.USER) { // All users belong to public role implicitly, add that role - MRole publicRole = new MRole(HiveMetaStore.PUBLIC, 0, HiveMetaStore.PUBLIC); + // TODO MS-SPLIT Change this back to HiveMetaStore.PUBLIC once HiveMetaStore has moved to + // stand-alone metastore. + //MRole publicRole = new MRole(HiveMetaStore.PUBLIC, 0, HiveMetaStore.PUBLIC); + MRole publicRole = new MRole("public", 0, "public"); mRoleMember.add(new MRoleMap(principalName, principalType.toString(), publicRole, 0, null, null, false)); } @@ -4609,7 +4606,7 @@ public List listMRoles(String principalName, @Override public List listRoles(String principalName, PrincipalType principalType) { - List result = new ArrayList(); + List result = new ArrayList<>(); List roleMaps = listMRoles(principalName, principalType); if (roleMaps != null) { for (MRoleMap roleMap : roleMaps) { @@ -4624,7 +4621,7 @@ public List listRoles(String principalName, PrincipalType principalType) { @Override public List listRolesWithGrants(String principalName, PrincipalType principalType) { - List result = new ArrayList(); + List result = new ArrayList<>(); List roleMaps = listMRoles(principalName, principalType); if (roleMaps != null) { for (MRoleMap roleMap : roleMaps) { @@ -4708,7 +4705,7 @@ public List listRoleNames() { query = pm.newQuery("select roleName from org.apache.hadoop.hive.metastore.model.MRole"); query.setResult("roleName"); Collection names = (Collection) query.execute(); - List roleNames = new ArrayList(); + List roleNames = new ArrayList<>(); for (Iterator i = names.iterator(); i.hasNext();) { roleNames.add((String) i.next()); } @@ -4729,8 +4726,8 @@ public PrincipalPrivilegeSet getUserPrivilegeSet(String userName, if (userName != null) { List user = this.listPrincipalMGlobalGrants(userName, PrincipalType.USER); if(user.size()>0) { - Map> userPriv = new HashMap>(); - List grantInfos = new ArrayList(user.size()); + Map> userPriv = new HashMap<>(); + List grantInfos = new ArrayList<>(user.size()); for (int i = 0; i < user.size(); i++) { MGlobalPrivilege item = user.get(i); grantInfos.add(new PrivilegeGrantInfo(item.getPrivilege(), item @@ -4742,12 +4739,12 @@ public PrincipalPrivilegeSet getUserPrivilegeSet(String userName, } } if (groupNames != null && groupNames.size() > 0) { - Map> groupPriv = new HashMap>(); + Map> groupPriv = new HashMap<>(); for(String groupName: groupNames) { List group = this.listPrincipalMGlobalGrants(groupName, PrincipalType.GROUP); if(group.size()>0) { - List grantInfos = new ArrayList(group.size()); + List grantInfos = new ArrayList<>(group.size()); for (int i = 0; i < group.size(); i++) { MGlobalPrivilege item = group.get(i); grantInfos.add(new PrivilegeGrantInfo(item.getPrivilege(), item @@ -4771,13 +4768,13 @@ public PrincipalPrivilegeSet getUserPrivilegeSet(String userName, public List getDBPrivilege(String dbName, String principalName, PrincipalType principalType) throws InvalidObjectException, MetaException { - dbName = HiveStringUtils.normalizeIdentifier(dbName); + dbName = normalizeIdentifier(dbName); if (principalName != null) { List userNameDbPriv = this.listPrincipalMDBGrants( principalName, principalType, dbName); if (userNameDbPriv != null && userNameDbPriv.size() > 0) { - List grantInfos = new ArrayList( + List grantInfos = new ArrayList<>( userNameDbPriv.size()); for (int i = 0; i < userNameDbPriv.size(); i++) { MDBPrivilege item = userNameDbPriv.get(i); @@ -4788,7 +4785,7 @@ public List getDBPrivilege(String dbName, return grantInfos; } } - return new ArrayList(0); + return new ArrayList<>(0); } @@ -4797,19 +4794,19 @@ public PrincipalPrivilegeSet getDBPrivilegeSet(String dbName, String userName, List groupNames) throws InvalidObjectException, MetaException { boolean commited = false; - dbName = HiveStringUtils.normalizeIdentifier(dbName); + dbName = normalizeIdentifier(dbName); PrincipalPrivilegeSet ret = new PrincipalPrivilegeSet(); try { openTransaction(); if (userName != null) { - Map> dbUserPriv = new HashMap>(); + Map> dbUserPriv = new HashMap<>(); dbUserPriv.put(userName, getDBPrivilege(dbName, userName, PrincipalType.USER)); ret.setUserPrivileges(dbUserPriv); } if (groupNames != null && groupNames.size() > 0) { - Map> dbGroupPriv = new HashMap>(); + Map> dbGroupPriv = new HashMap<>(); for (String groupName : groupNames) { dbGroupPriv.put(groupName, getDBPrivilege(dbName, groupName, PrincipalType.GROUP)); @@ -4818,7 +4815,7 @@ public PrincipalPrivilegeSet getDBPrivilegeSet(String dbName, } Set roleNames = listAllRolesInHierarchy(userName, groupNames); if (roleNames != null && roleNames.size() > 0) { - Map> dbRolePriv = new HashMap>(); + Map> dbRolePriv = new HashMap<>(); for (String roleName : roleNames) { dbRolePriv .put(roleName, getDBPrivilege(dbName, roleName, PrincipalType.ROLE)); @@ -4840,19 +4837,19 @@ public PrincipalPrivilegeSet getPartitionPrivilegeSet(String dbName, List groupNames) throws InvalidObjectException, MetaException { boolean commited = false; PrincipalPrivilegeSet ret = new PrincipalPrivilegeSet(); - tableName = HiveStringUtils.normalizeIdentifier(tableName); - dbName = HiveStringUtils.normalizeIdentifier(dbName); + tableName = normalizeIdentifier(tableName); + dbName = normalizeIdentifier(dbName); try { openTransaction(); if (userName != null) { - Map> partUserPriv = new HashMap>(); + Map> partUserPriv = new HashMap<>(); partUserPriv.put(userName, getPartitionPrivilege(dbName, tableName, partition, userName, PrincipalType.USER)); ret.setUserPrivileges(partUserPriv); } if (groupNames != null && groupNames.size() > 0) { - Map> partGroupPriv = new HashMap>(); + Map> partGroupPriv = new HashMap<>(); for (String groupName : groupNames) { partGroupPriv.put(groupName, getPartitionPrivilege(dbName, tableName, partition, groupName, PrincipalType.GROUP)); @@ -4861,7 +4858,7 @@ public PrincipalPrivilegeSet getPartitionPrivilegeSet(String dbName, } Set roleNames = listAllRolesInHierarchy(userName, groupNames); if (roleNames != null && roleNames.size() > 0) { - Map> partRolePriv = new HashMap>(); + Map> partRolePriv = new HashMap<>(); for (String roleName : roleNames) { partRolePriv.put(roleName, getPartitionPrivilege(dbName, tableName, partition, roleName, PrincipalType.ROLE)); @@ -4883,19 +4880,19 @@ public PrincipalPrivilegeSet getTablePrivilegeSet(String dbName, throws InvalidObjectException, MetaException { boolean commited = false; PrincipalPrivilegeSet ret = new PrincipalPrivilegeSet(); - tableName = HiveStringUtils.normalizeIdentifier(tableName); - dbName = HiveStringUtils.normalizeIdentifier(dbName); + tableName = normalizeIdentifier(tableName); + dbName = normalizeIdentifier(dbName); try { openTransaction(); if (userName != null) { - Map> tableUserPriv = new HashMap>(); + Map> tableUserPriv = new HashMap<>(); tableUserPriv.put(userName, getTablePrivilege(dbName, tableName, userName, PrincipalType.USER)); ret.setUserPrivileges(tableUserPriv); } if (groupNames != null && groupNames.size() > 0) { - Map> tableGroupPriv = new HashMap>(); + Map> tableGroupPriv = new HashMap<>(); for (String groupName : groupNames) { tableGroupPriv.put(groupName, getTablePrivilege(dbName, tableName, groupName, PrincipalType.GROUP)); @@ -4904,7 +4901,7 @@ public PrincipalPrivilegeSet getTablePrivilegeSet(String dbName, } Set roleNames = listAllRolesInHierarchy(userName, groupNames); if (roleNames != null && roleNames.size() > 0) { - Map> tableRolePriv = new HashMap>(); + Map> tableRolePriv = new HashMap<>(); for (String roleName : roleNames) { tableRolePriv.put(roleName, getTablePrivilege(dbName, tableName, roleName, PrincipalType.ROLE)); @@ -4925,22 +4922,22 @@ public PrincipalPrivilegeSet getColumnPrivilegeSet(String dbName, String tableName, String partitionName, String columnName, String userName, List groupNames) throws InvalidObjectException, MetaException { - tableName = HiveStringUtils.normalizeIdentifier(tableName); - dbName = HiveStringUtils.normalizeIdentifier(dbName); - columnName = HiveStringUtils.normalizeIdentifier(columnName); + tableName = normalizeIdentifier(tableName); + dbName = normalizeIdentifier(dbName); + columnName = normalizeIdentifier(columnName); boolean commited = false; PrincipalPrivilegeSet ret = new PrincipalPrivilegeSet(); try { openTransaction(); if (userName != null) { - Map> columnUserPriv = new HashMap>(); + Map> columnUserPriv = new HashMap<>(); columnUserPriv.put(userName, getColumnPrivilege(dbName, tableName, columnName, partitionName, userName, PrincipalType.USER)); ret.setUserPrivileges(columnUserPriv); } if (groupNames != null && groupNames.size() > 0) { - Map> columnGroupPriv = new HashMap>(); + Map> columnGroupPriv = new HashMap<>(); for (String groupName : groupNames) { columnGroupPriv.put(groupName, getColumnPrivilege(dbName, tableName, columnName, partitionName, groupName, PrincipalType.GROUP)); @@ -4949,7 +4946,7 @@ public PrincipalPrivilegeSet getColumnPrivilegeSet(String dbName, } Set roleNames = listAllRolesInHierarchy(userName, groupNames); if (roleNames != null && roleNames.size() > 0) { - Map> columnRolePriv = new HashMap>(); + Map> columnRolePriv = new HashMap<>(); for (String roleName : roleNames) { columnRolePriv.put(roleName, getColumnPrivilege(dbName, tableName, columnName, partitionName, roleName, PrincipalType.ROLE)); @@ -4969,15 +4966,15 @@ private List getPartitionPrivilege(String dbName, String tableName, String partName, String principalName, PrincipalType principalType) { - tableName = HiveStringUtils.normalizeIdentifier(tableName); - dbName = HiveStringUtils.normalizeIdentifier(dbName); + tableName = normalizeIdentifier(tableName); + dbName = normalizeIdentifier(dbName); if (principalName != null) { List userNameTabPartPriv = this .listPrincipalMPartitionGrants(principalName, principalType, dbName, tableName, partName); if (userNameTabPartPriv != null && userNameTabPartPriv.size() > 0) { - List grantInfos = new ArrayList( + List grantInfos = new ArrayList<>( userNameTabPartPriv.size()); for (int i = 0; i < userNameTabPartPriv.size(); i++) { MPartitionPrivilege item = userNameTabPartPriv.get(i); @@ -4989,7 +4986,7 @@ private List getPartitionPrivilege(String dbName, return grantInfos; } } - return new ArrayList(0); + return new ArrayList<>(0); } private PrincipalType getPrincipalTypeFromStr(String str) { @@ -4998,15 +4995,15 @@ private PrincipalType getPrincipalTypeFromStr(String str) { private List getTablePrivilege(String dbName, String tableName, String principalName, PrincipalType principalType) { - tableName = HiveStringUtils.normalizeIdentifier(tableName); - dbName = HiveStringUtils.normalizeIdentifier(dbName); + tableName = normalizeIdentifier(tableName); + dbName = normalizeIdentifier(dbName); if (principalName != null) { List userNameTabPartPriv = this .listAllMTableGrants(principalName, principalType, dbName, tableName); if (userNameTabPartPriv != null && userNameTabPartPriv.size() > 0) { - List grantInfos = new ArrayList( + List grantInfos = new ArrayList<>( userNameTabPartPriv.size()); for (int i = 0; i < userNameTabPartPriv.size(); i++) { MTablePrivilege item = userNameTabPartPriv.get(i); @@ -5017,23 +5014,23 @@ private List getTablePrivilege(String dbName, return grantInfos; } } - return new ArrayList(0); + return new ArrayList<>(0); } private List getColumnPrivilege(String dbName, String tableName, String columnName, String partitionName, String principalName, PrincipalType principalType) { - tableName = HiveStringUtils.normalizeIdentifier(tableName); - dbName = HiveStringUtils.normalizeIdentifier(dbName); - columnName = HiveStringUtils.normalizeIdentifier(columnName); + tableName = normalizeIdentifier(tableName); + dbName = normalizeIdentifier(dbName); + columnName = normalizeIdentifier(columnName); if (partitionName == null) { List userNameColumnPriv = this .listPrincipalMTableColumnGrants(principalName, principalType, dbName, tableName, columnName); if (userNameColumnPriv != null && userNameColumnPriv.size() > 0) { - List grantInfos = new ArrayList( + List grantInfos = new ArrayList<>( userNameColumnPriv.size()); for (int i = 0; i < userNameColumnPriv.size(); i++) { MTableColumnPrivilege item = userNameColumnPriv.get(i); @@ -5048,7 +5045,7 @@ private List getColumnPrivilege(String dbName, .listPrincipalMPartitionColumnGrants(principalName, principalType, dbName, tableName, partitionName, columnName); if (userNameColumnPriv != null && userNameColumnPriv.size() > 0) { - List grantInfos = new ArrayList( + List grantInfos = new ArrayList<>( userNameColumnPriv.size()); for (int i = 0; i < userNameColumnPriv.size(); i++) { MPartitionColumnPrivilege item = userNameColumnPriv.get(i); @@ -5059,7 +5056,7 @@ private List getColumnPrivilege(String dbName, return grantInfos; } } - return new ArrayList(0); + return new ArrayList<>(0); } @Override @@ -5069,13 +5066,13 @@ public boolean grantPrivileges(PrivilegeBag privileges) throws InvalidObjectExce int now = (int) (System.currentTimeMillis() / 1000); try { openTransaction(); - List persistentObjs = new ArrayList(); + List persistentObjs = new ArrayList<>(); List privilegeList = privileges.getPrivileges(); if (privilegeList != null && privilegeList.size() > 0) { Iterator privIter = privilegeList.iterator(); - Set privSet = new HashSet(); + Set privSet = new HashSet<>(); while (privIter.hasNext()) { HiveObjectPrivilege privDef = privIter.next(); HiveObjectRef hiveObject = privDef.getHiveObject(); @@ -5283,7 +5280,7 @@ public boolean revokePrivileges(PrivilegeBag privileges, boolean grantOption) boolean committed = false; try { openTransaction(); - List persistentObjs = new ArrayList(); + List persistentObjs = new ArrayList<>(); List privilegeList = privileges.getPrivileges(); @@ -5525,7 +5522,7 @@ public boolean revokePrivileges(PrivilegeBag privileges, boolean grantOption) public List listMRoleMembers(String roleName) { boolean success = false; Query query = null; - List mRoleMemeberList = new ArrayList(); + List mRoleMemeberList = new ArrayList<>(); try { LOG.debug("Executing listRoleMembers"); @@ -5549,7 +5546,7 @@ public List listMRoleMembers(String roleName) { @Override public List listRoleMembers(String roleName) { List roleMaps = listMRoleMembers(roleName); - List rolePrinGrantList = new ArrayList(); + List rolePrinGrantList = new ArrayList<>(); if (roleMaps != null) { for (MRoleMap roleMap : roleMaps) { @@ -5576,7 +5573,7 @@ public List listPrincipalMGlobalGrants(String principalName, PrincipalType principalType) { boolean commited = false; Query query = null; - List userNameDbPriv = new ArrayList(); + List userNameDbPriv = new ArrayList<>(); try { List mPrivs = null; openTransaction(); @@ -5603,9 +5600,9 @@ public List listPrincipalGlobalGrants(String principalName, List mUsers = listPrincipalMGlobalGrants(principalName, principalType); if (mUsers.isEmpty()) { - return Collections. emptyList(); + return Collections.emptyList(); } - List result = new ArrayList(); + List result = new ArrayList<>(); for (int i = 0; i < mUsers.size(); i++) { MGlobalPrivilege sUsr = mUsers.get(i); HiveObjectRef objectRef = new HiveObjectRef( @@ -5637,7 +5634,7 @@ public List listGlobalGrantsAll() { } private List convertGlobal(List privs) { - List result = new ArrayList(); + List result = new ArrayList<>(); for (MGlobalPrivilege priv : privs) { String pname = priv.getPrincipalName(); PrincipalType ptype = PrincipalType.valueOf(priv.getPrincipalType()); @@ -5656,8 +5653,8 @@ public List listPrincipalMDBGrants(String principalName, PrincipalType principalType, String dbName) { boolean success = false; Query query = null; - List mSecurityDBList = new ArrayList(); - dbName = HiveStringUtils.normalizeIdentifier(dbName); + List mSecurityDBList = new ArrayList<>(); + dbName = normalizeIdentifier(dbName); try { LOG.debug("Executing listPrincipalDBGrants"); @@ -5686,9 +5683,9 @@ public List listPrincipalDBGrants(String principalName, String dbName) { List mDbs = listPrincipalMDBGrants(principalName, principalType, dbName); if (mDbs.isEmpty()) { - return Collections.emptyList(); + return Collections.emptyList(); } - List result = new ArrayList(); + List result = new ArrayList<>(); for (int i = 0; i < mDbs.size(); i++) { MDBPrivilege sDB = mDbs.get(i); HiveObjectRef objectRef = new HiveObjectRef( @@ -5725,7 +5722,7 @@ public List listDBGrantsAll(String dbName) { } private List convertDB(List privs) { - List result = new ArrayList(); + List result = new ArrayList<>(); for (MDBPrivilege priv : privs) { String pname = priv.getPrincipalName(); PrincipalType ptype = PrincipalType.valueOf(priv.getPrincipalType()); @@ -5777,11 +5774,11 @@ private List listPrincipalAllDBGrant(String principalName, public List listAllTableGrants(String dbName, String tableName) { boolean success = false; Query query = null; - tableName = HiveStringUtils.normalizeIdentifier(tableName); - dbName = HiveStringUtils.normalizeIdentifier(dbName); - List mSecurityTabList = new ArrayList(); - tableName = HiveStringUtils.normalizeIdentifier(tableName); - dbName = HiveStringUtils.normalizeIdentifier(dbName); + tableName = normalizeIdentifier(tableName); + dbName = normalizeIdentifier(dbName); + List mSecurityTabList = new ArrayList<>(); + tableName = normalizeIdentifier(tableName); + dbName = normalizeIdentifier(dbName); try { LOG.debug("Executing listAllTableGrants"); @@ -5805,11 +5802,11 @@ public List listAllTableGrants(String dbName, String tableName) @SuppressWarnings("unchecked") public List listTableAllPartitionGrants(String dbName, String tableName) { - tableName = HiveStringUtils.normalizeIdentifier(tableName); - dbName = HiveStringUtils.normalizeIdentifier(dbName); + tableName = normalizeIdentifier(tableName); + dbName = normalizeIdentifier(dbName); boolean success = false; Query query = null; - List mSecurityTabPartList = new ArrayList(); + List mSecurityTabPartList = new ArrayList<>(); try { LOG.debug("Executing listTableAllPartitionGrants"); @@ -5834,9 +5831,9 @@ public List listTableAllPartitionGrants(String dbName, Stri public List listTableAllColumnGrants(String dbName, String tableName) { boolean success = false; Query query = null; - List mTblColPrivilegeList = new ArrayList(); - tableName = HiveStringUtils.normalizeIdentifier(tableName); - dbName = HiveStringUtils.normalizeIdentifier(dbName); + List mTblColPrivilegeList = new ArrayList<>(); + tableName = normalizeIdentifier(tableName); + dbName = normalizeIdentifier(dbName); try { LOG.debug("Executing listTableAllColumnGrants"); @@ -5863,9 +5860,9 @@ public List listTableAllPartitionColumnGrants(String String tableName) { boolean success = false; Query query = null; - tableName = HiveStringUtils.normalizeIdentifier(tableName); - dbName = HiveStringUtils.normalizeIdentifier(dbName); - List mSecurityColList = new ArrayList(); + tableName = normalizeIdentifier(tableName); + dbName = normalizeIdentifier(dbName); + List mSecurityColList = new ArrayList<>(); try { LOG.debug("Executing listTableAllPartitionColumnGrants"); @@ -5891,8 +5888,8 @@ public List listTableAllPartitionColumnGrants(String public List listPartitionAllColumnGrants(String dbName, String tableName, List partNames) { boolean success = false; - tableName = HiveStringUtils.normalizeIdentifier(tableName); - dbName = HiveStringUtils.normalizeIdentifier(dbName); + tableName = normalizeIdentifier(tableName); + dbName = normalizeIdentifier(dbName); List mSecurityColList = null; try { @@ -5923,7 +5920,7 @@ public void dropPartitionAllColumnGrantsNoTxn( @SuppressWarnings("unchecked") private List listDatabaseGrants(String dbName, QueryWrapper queryWrapper) { - dbName = HiveStringUtils.normalizeIdentifier(dbName); + dbName = normalizeIdentifier(dbName); boolean success = false; try { LOG.debug("Executing listDatabaseGrants"); @@ -5946,8 +5943,8 @@ private List listDatabaseGrants(String dbName, QueryWrapper queryW @SuppressWarnings("unchecked") private List listPartitionGrants(String dbName, String tableName, List partNames) { - tableName = HiveStringUtils.normalizeIdentifier(tableName); - dbName = HiveStringUtils.normalizeIdentifier(dbName); + tableName = normalizeIdentifier(tableName); + dbName = normalizeIdentifier(dbName); boolean success = false; List mSecurityTabPartList = null; @@ -5990,8 +5987,8 @@ private ObjectPair makeQueryByPartitionNames( String queryStr = tbCol + " == t1 && " + dbCol + " == t2"; String paramStr = "java.lang.String t1, java.lang.String t2"; Object[] params = new Object[2 + partNames.size()]; - params[0] = HiveStringUtils.normalizeIdentifier(tableName); - params[1] = HiveStringUtils.normalizeIdentifier(dbName); + params[0] = normalizeIdentifier(tableName); + params[1] = normalizeIdentifier(dbName); int index = 0; for (String partName : partNames) { params[index + 2] = partName; @@ -6002,18 +5999,18 @@ private ObjectPair makeQueryByPartitionNames( queryStr += ")"; Query query = pm.newQuery(clazz, queryStr); query.declareParameters(paramStr); - return new ObjectPair(query, params); + return new ObjectPair<>(query, params); } @SuppressWarnings("unchecked") public List listAllMTableGrants( String principalName, PrincipalType principalType, String dbName, String tableName) { - tableName = HiveStringUtils.normalizeIdentifier(tableName); - dbName = HiveStringUtils.normalizeIdentifier(dbName); + tableName = normalizeIdentifier(tableName); + dbName = normalizeIdentifier(dbName); boolean success = false; Query query = null; - List mSecurityTabPartList = new ArrayList(); + List mSecurityTabPartList = new ArrayList<>(); try { openTransaction(); LOG.debug("Executing listAllTableGrants"); @@ -6045,9 +6042,9 @@ public List listAllTableGrants(String principalName, List mTbls = listAllMTableGrants(principalName, principalType, dbName, tableName); if (mTbls.isEmpty()) { - return Collections. emptyList(); + return Collections.emptyList(); } - List result = new ArrayList(); + List result = new ArrayList<>(); for (int i = 0; i < mTbls.size(); i++) { MTablePrivilege sTbl = mTbls.get(i); HiveObjectRef objectRef = new HiveObjectRef( @@ -6068,9 +6065,9 @@ public List listPrincipalMPartitionGrants( String tableName, String partName) { boolean success = false; Query query = null; - tableName = HiveStringUtils.normalizeIdentifier(tableName); - dbName = HiveStringUtils.normalizeIdentifier(dbName); - List mSecurityTabPartList = new ArrayList(); + tableName = normalizeIdentifier(tableName); + dbName = normalizeIdentifier(dbName); + List mSecurityTabPartList = new ArrayList<>(); try { LOG.debug("Executing listPrincipalPartitionGrants"); @@ -6107,9 +6104,9 @@ public List listPrincipalPartitionGrants(String principalNa List mParts = listPrincipalMPartitionGrants(principalName, principalType, dbName, tableName, partName); if (mParts.isEmpty()) { - return Collections. emptyList(); + return Collections.emptyList(); } - List result = new ArrayList(); + List result = new ArrayList<>(); for (int i = 0; i < mParts.size(); i++) { MPartitionPrivilege sPart = mParts.get(i); HiveObjectRef objectRef = new HiveObjectRef( @@ -6132,10 +6129,10 @@ public List listPrincipalMTableColumnGrants( String tableName, String columnName) { boolean success = false; Query query = null; - tableName = HiveStringUtils.normalizeIdentifier(tableName); - dbName = HiveStringUtils.normalizeIdentifier(dbName); - columnName = HiveStringUtils.normalizeIdentifier(columnName); - List mSecurityColList = new ArrayList(); + tableName = normalizeIdentifier(tableName); + dbName = normalizeIdentifier(dbName); + columnName = normalizeIdentifier(columnName); + List mSecurityColList = new ArrayList<>(); try { LOG.debug("Executing listPrincipalTableColumnGrants"); @@ -6172,7 +6169,7 @@ public List listPrincipalTableColumnGrants(String principal if (mTableCols.isEmpty()) { return Collections.emptyList(); } - List result = new ArrayList(); + List result = new ArrayList<>(); for (int i = 0; i < mTableCols.size(); i++) { MTableColumnPrivilege sCol = mTableCols.get(i); HiveObjectRef objectRef = new HiveObjectRef( @@ -6194,10 +6191,10 @@ public List listPrincipalMPartitionColumnGrants( String tableName, String partitionName, String columnName) { boolean success = false; Query query = null; - tableName = HiveStringUtils.normalizeIdentifier(tableName); - dbName = HiveStringUtils.normalizeIdentifier(dbName); - columnName = HiveStringUtils.normalizeIdentifier(columnName); - List mSecurityColList = new ArrayList(); + tableName = normalizeIdentifier(tableName); + dbName = normalizeIdentifier(dbName); + columnName = normalizeIdentifier(columnName); + List mSecurityColList = new ArrayList<>(); try { LOG.debug("Executing listPrincipalPartitionColumnGrants"); @@ -6237,7 +6234,7 @@ public List listPrincipalPartitionColumnGrants(String princ if (mPartitionCols.isEmpty()) { return Collections.emptyList(); } - List result = new ArrayList(); + List result = new ArrayList<>(); for (int i = 0; i < mPartitionCols.size(); i++) { MPartitionColumnPrivilege sCol = mPartitionCols.get(i); HiveObjectRef objectRef = new HiveObjectRef( @@ -6313,7 +6310,7 @@ public List listPartitionColumnGrantsAll(String dbName, Str } private List convertPartCols(List privs) { - List result = new ArrayList(); + List result = new ArrayList<>(); for (MPartitionColumnPrivilege priv : privs) { String pname = priv.getPrincipalName(); PrincipalType ptype = PrincipalType.valueOf(priv.getPrincipalType()); @@ -6391,8 +6388,8 @@ public List listPrincipalTableGrantsAll(String principalNam public List listTableGrantsAll(String dbName, String tableName) { boolean success = false; Query query = null; - dbName = HiveStringUtils.normalizeIdentifier(dbName); - tableName = HiveStringUtils.normalizeIdentifier(tableName); + dbName = normalizeIdentifier(dbName); + tableName = normalizeIdentifier(tableName); try { openTransaction(); LOG.debug("Executing listTableGrantsAll"); @@ -6413,7 +6410,7 @@ public List listTableGrantsAll(String dbName, String tableN } private List convertTable(List privs) { - List result = new ArrayList(); + List result = new ArrayList<>(); for (MTablePrivilege priv : privs) { String pname = priv.getPrincipalName(); PrincipalType ptype = PrincipalType.valueOf(priv.getPrincipalType()); @@ -6511,7 +6508,7 @@ public List listPartitionGrantsAll(String dbName, String ta } private List convertPartition(List privs) { - List result = new ArrayList(); + List result = new ArrayList<>(); for (MPartitionPrivilege priv : privs) { String pname = priv.getPrincipalName(); PrincipalType ptype = PrincipalType.valueOf(priv.getPrincipalType()); @@ -6593,8 +6590,8 @@ public List listTableColumnGrantsAll(String dbName, String String columnName) { boolean success = false; Query query = null; - dbName = HiveStringUtils.normalizeIdentifier(dbName); - tableName = HiveStringUtils.normalizeIdentifier(tableName); + dbName = normalizeIdentifier(dbName); + tableName = normalizeIdentifier(tableName); try { openTransaction(); LOG.debug("Executing listPrincipalTableColumnGrantsAll"); @@ -6616,7 +6613,7 @@ public List listTableColumnGrantsAll(String dbName, String } private List convertTableCols(List privs) { - List result = new ArrayList(); + List result = new ArrayList<>(); for (MTableColumnPrivilege priv : privs) { String pname = priv.getPrincipalName(); PrincipalType ptype = PrincipalType.valueOf(priv.getPrincipalType()); @@ -6721,7 +6718,7 @@ private String getPartitionStr(Table tbl, Map partName) throws In throw new InvalidPartitionException("Number of partition columns in table: "+ tbl.getPartitionKeysSize() + " doesn't match with number of supplied partition values: "+partName.size()); } - final List storedVals = new ArrayList(tbl.getPartitionKeysSize()); + final List storedVals = new ArrayList<>(tbl.getPartitionKeysSize()); for(FieldSchema partKey : tbl.getPartitionKeys()){ String partVal = partName.get(partKey.getName()); if(null == partVal) { @@ -6796,7 +6793,7 @@ public long executeJDOQLUpdate(String queryStr) { public Set listFSRoots() { boolean committed = false; Query query = null; - Set fsRoots = new HashSet(); + Set fsRoots = new HashSet<>(); try { openTransaction(); query = pm.newQuery(MDatabase.class); @@ -6888,8 +6885,8 @@ public void setUpdateLocations(Map updateLocations) { public UpdateMDatabaseURIRetVal updateMDatabaseURI(URI oldLoc, URI newLoc, boolean dryRun) { boolean committed = false; Query query = null; - Map updateLocations = new HashMap(); - List badRecords = new ArrayList(); + Map updateLocations = new HashMap<>(); + List badRecords = new ArrayList<>(); UpdateMDatabaseURIRetVal retVal = null; try { openTransaction(); @@ -7027,8 +7024,8 @@ public UpdatePropURIRetVal updateMStorageDescriptorTblPropURI(URI oldLoc, URI ne String tblPropKey, boolean isDryRun) { boolean committed = false; Query query = null; - Map updateLocations = new HashMap(); - List badRecords = new ArrayList(); + Map updateLocations = new HashMap<>(); + List badRecords = new ArrayList<>(); UpdatePropURIRetVal retVal = null; try { openTransaction(); @@ -7097,8 +7094,8 @@ public UpdateMStorageDescriptorTblURIRetVal updateMStorageDescriptorTblURI(URI o URI newLoc, boolean isDryRun) { boolean committed = false; Query query = null; - Map updateLocations = new HashMap(); - List badRecords = new ArrayList(); + Map updateLocations = new HashMap<>(); + List badRecords = new ArrayList<>(); int numNullRecords = 0; UpdateMStorageDescriptorTblURIRetVal retVal = null; try { @@ -7177,8 +7174,8 @@ public UpdateSerdeURIRetVal updateSerdeURI(URI oldLoc, URI newLoc, String serdeP boolean isDryRun) { boolean committed = false; Query query = null; - Map updateLocations = new HashMap(); - List badRecords = new ArrayList(); + Map updateLocations = new HashMap<>(); + List badRecords = new ArrayList<>(); UpdateSerdeURIRetVal retVal = null; try { openTransaction(); @@ -7493,10 +7490,9 @@ public ColumnStatistics getTableColumnStatistics(String dbName, String tableName protected ColumnStatistics getTableColumnStatisticsInternal( String dbName, String tableName, final List colNames, boolean allowSql, boolean allowJdo) throws MetaException, NoSuchObjectException { - final boolean enableBitVector = HiveConf.getBoolVar(getConf(), - HiveConf.ConfVars.HIVE_STATS_FETCH_BITVECTOR); - return new GetStatHelper(HiveStringUtils.normalizeIdentifier(dbName), - HiveStringUtils.normalizeIdentifier(tableName), allowSql, allowJdo) { + final boolean enableBitVector = MetastoreConf.getBoolVar(getConf(), ConfVars.STATS_FETCH_BITVECTOR); + return new GetStatHelper(normalizeIdentifier(dbName), + normalizeIdentifier(tableName), allowSql, allowJdo) { @Override protected ColumnStatistics getSqlResult(GetHelper ctx) throws MetaException { return directSql.getTableStats(dbName, tblName, colNames, enableBitVector); @@ -7512,7 +7508,7 @@ protected ColumnStatistics getJdoResult( // LastAnalyzed is stored per column, but thrift object has it per multiple columns. // Luckily, nobody actually uses it, so we will set to lowest value of all columns for now. ColumnStatisticsDesc desc = StatObjectConverter.getTableColumnStatisticsDesc(mStats.get(0)); - List statObjs = new ArrayList(mStats.size()); + List statObjs = new ArrayList<>(mStats.size()); for (MTableColumnStatistics mStat : mStats) { if (desc.getLastAnalyzed() > mStat.getLastAnalyzed()) { desc.setLastAnalyzed(mStat.getLastAnalyzed()); @@ -7538,8 +7534,7 @@ public List getPartitionColumnStatistics(String dbName, String protected List getPartitionColumnStatisticsInternal( String dbName, String tableName, final List partNames, final List colNames, boolean allowSql, boolean allowJdo) throws MetaException, NoSuchObjectException { - final boolean enableBitVector = HiveConf.getBoolVar(getConf(), - HiveConf.ConfVars.HIVE_STATS_FETCH_BITVECTOR); + final boolean enableBitVector = MetastoreConf.getBoolVar(getConf(), ConfVars.STATS_FETCH_BITVECTOR); return new GetListHelper(dbName, tableName, allowSql, allowJdo) { @Override protected List getSqlResult( @@ -7553,7 +7548,7 @@ protected List getJdoResult( try { List mStats = getMPartitionColumnStatistics(getTable(), partNames, colNames, queryWrapper); - List result = new ArrayList( + List result = new ArrayList<>( Math.min(mStats.size(), partNames.size())); String lastPartName = null; List curList = null; @@ -7561,7 +7556,7 @@ protected List getJdoResult( for (int i = 0; i <= mStats.size(); ++i) { boolean isLast = i == mStats.size(); MPartitionColumnStatistics mStatsObj = isLast ? null : mStats.get(i); - String partName = isLast ? null : (String)mStatsObj.getPartitionName(); + String partName = isLast ? null : mStatsObj.getPartitionName(); if (isLast || !partName.equals(lastPartName)) { if (i != 0) { result.add(new ColumnStatistics(csd, curList)); @@ -7570,7 +7565,7 @@ protected List getJdoResult( continue; } csd = StatObjectConverter.getPartitionColumnStatisticsDesc(mStatsObj); - curList = new ArrayList(colNames.size()); + curList = new ArrayList<>(colNames.size()); } curList.add(StatObjectConverter.getPartitionColumnStatisticsObj(mStatsObj, enableBitVector)); lastPartName = partName; @@ -7588,12 +7583,10 @@ protected List getJdoResult( @Override public AggrStats get_aggr_stats_for(String dbName, String tblName, final List partNames, final List colNames) throws MetaException, NoSuchObjectException { - final boolean useDensityFunctionForNDVEstimation = HiveConf.getBoolVar(getConf(), - HiveConf.ConfVars.HIVE_METASTORE_STATS_NDV_DENSITY_FUNCTION); - final double ndvTuner = HiveConf.getFloatVar(getConf(), - HiveConf.ConfVars.HIVE_METASTORE_STATS_NDV_TUNER); - final boolean enableBitVector = HiveConf.getBoolVar(getConf(), - HiveConf.ConfVars.HIVE_STATS_FETCH_BITVECTOR); + final boolean useDensityFunctionForNDVEstimation = MetastoreConf.getBoolVar(getConf(), + ConfVars.STATS_NDV_DENSITY_FUNCTION); + final double ndvTuner = MetastoreConf.getDoubleVar(getConf(), ConfVars.STATS_NDV_TUNER); + final boolean enableBitVector = MetastoreConf.getBoolVar(getConf(), ConfVars.STATS_FETCH_BITVECTOR); return new GetHelper(dbName, tblName, true, false) { @Override protected AggrStats getSqlResult(GetHelper ctx) @@ -7619,8 +7612,7 @@ protected String describeResult() { @Override public Map> getColStatsForTablePartitions(String dbName, String tableName) throws MetaException, NoSuchObjectException { - final boolean enableBitVector = HiveConf.getBoolVar(getConf(), - HiveConf.ConfVars.HIVE_STATS_FETCH_BITVECTOR); + final boolean enableBitVector = MetastoreConf.getBoolVar(getConf(), ConfVars.STATS_FETCH_BITVECTOR); return new GetHelper>>(dbName, tableName, true, false) { @Override protected Map> getSqlResult( @@ -7762,9 +7754,9 @@ public boolean deletePartitionColumnStatistics(String dbName, String tableName, query.setUnique(true); mStatsObj = (MPartitionColumnStatistics) query.executeWithArray(partName.trim(), - HiveStringUtils.normalizeIdentifier(dbName), - HiveStringUtils.normalizeIdentifier(tableName), - HiveStringUtils.normalizeIdentifier(colName)); + normalizeIdentifier(dbName), + normalizeIdentifier(tableName), + normalizeIdentifier(colName)); pm.retrieve(mStatsObj); if (mStatsObj != null) { pm.deletePersistent(mStatsObj); @@ -7775,8 +7767,8 @@ public boolean deletePartitionColumnStatistics(String dbName, String tableName, } else { mStatsObjColl = (List) query.execute(partName.trim(), - HiveStringUtils.normalizeIdentifier(dbName), - HiveStringUtils.normalizeIdentifier(tableName)); + normalizeIdentifier(dbName), + normalizeIdentifier(tableName)); pm.retrieveAll(mStatsObjColl); if (mStatsObjColl != null) { pm.deletePersistentAll(mStatsObjColl); @@ -7831,9 +7823,9 @@ public boolean deleteTableColumnStatistics(String dbName, String tableName, Stri if (colName != null) { query.setUnique(true); mStatsObj = - (MTableColumnStatistics) query.execute(HiveStringUtils.normalizeIdentifier(tableName), - HiveStringUtils.normalizeIdentifier(dbName), - HiveStringUtils.normalizeIdentifier(colName)); + (MTableColumnStatistics) query.execute(normalizeIdentifier(tableName), + normalizeIdentifier(dbName), + normalizeIdentifier(colName)); pm.retrieve(mStatsObj); if (mStatsObj != null) { @@ -7845,8 +7837,8 @@ public boolean deleteTableColumnStatistics(String dbName, String tableName, Stri } else { mStatsObjColl = (List) query.execute( - HiveStringUtils.normalizeIdentifier(tableName), - HiveStringUtils.normalizeIdentifier(dbName)); + normalizeIdentifier(tableName), + normalizeIdentifier(dbName)); pm.retrieveAll(mStatsObjColl); if (mStatsObjColl != null) { pm.deletePersistentAll(mStatsObjColl); @@ -7872,8 +7864,7 @@ public long cleanupEvents() { long delCnt; LOG.debug("Begin executing cleanupEvents"); Long expiryTime = - HiveConf.getTimeVar(getConf(), ConfVars.METASTORE_EVENT_EXPIRY_DURATION, - TimeUnit.MILLISECONDS); + MetastoreConf.getTimeVar(getConf(), ConfVars.EVENT_EXPIRY_DURATION, TimeUnit.MILLISECONDS); Long curTime = System.currentTimeMillis(); try { openTransaction(); @@ -7971,7 +7962,7 @@ public List getAllTokenIdentifiers() { LOG.debug("Begin executing getAllTokenIdentifiers"); boolean committed = false; Query query = null; - List tokenIdents = new ArrayList(); + List tokenIdents = new ArrayList<>(); try { openTransaction(); @@ -8107,8 +8098,7 @@ private synchronized void checkSchema() throws MetaException { return; } - boolean strictValidation = - HiveConf.getBoolVar(getConf(), HiveConf.ConfVars.METASTORE_SCHEMA_VERIFICATION); + boolean strictValidation = MetastoreConf.getBoolVar(getConf(), ConfVars.SCHEMA_VERIFICATION); // read the schema version stored in metastore db String dbSchemaVer = getMetaStoreSchemaVersion(); // version of schema for this version of hive @@ -8120,7 +8110,7 @@ private synchronized void checkSchema() throws MetaException { throw new MetaException("Version information not found in metastore. "); } else { LOG.warn("Version information not found in metastore. " - + HiveConf.ConfVars.METASTORE_SCHEMA_VERIFICATION.toString() + + + ConfVars.SCHEMA_VERIFICATION.toString() + " is not enabled so recording the schema version " + hiveSchemaVer); setMetaStoreSchemaVersion(hiveSchemaVer, @@ -8138,8 +8128,7 @@ private synchronized void checkSchema() throws MetaException { } else { LOG.error("Version information found in metastore differs " + dbSchemaVer + " from expected schema version " + hiveSchemaVer + - ". Schema verififcation is disabled " + - HiveConf.ConfVars.METASTORE_SCHEMA_VERIFICATION); + ". Schema verififcation is disabled " + ConfVars.SCHEMA_VERIFICATION); setMetaStoreSchemaVersion(hiveSchemaVer, "Set by MetaStore " + USER + "@" + HOSTNAME); } @@ -8166,7 +8155,7 @@ public String getMetaStoreSchemaVersion() throws MetaException { private MVersionTable getMSchemaVersion() throws NoSuchObjectException, MetaException { boolean committed = false; Query query = null; - List mVerTables = new ArrayList(); + List mVerTables = new ArrayList<>(); try { openTransaction(); query = pm.newQuery(MVersionTable.class); @@ -8205,7 +8194,7 @@ public void setMetaStoreSchemaVersion(String schemaVersion, String comment) thro MVersionTable mSchemaVer; boolean commited = false; boolean recordVersion = - HiveConf.getBoolVar(getConf(), HiveConf.ConfVars.METASTORE_SCHEMA_VERIFICATION_RECORD_VERSION); + MetastoreConf.getBoolVar(getConf(), ConfVars.SCHEMA_VERIFICATION_RECORD_VERSION); if (!recordVersion) { LOG.warn("setMetaStoreSchemaVersion called but recording version is disabled: " + "version = " + schemaVersion + ", comment = " + comment); @@ -8317,7 +8306,7 @@ private MFunction convertToMFunction(Function func) throws InvalidObjectExceptio private List convertToResourceUriList(List mresourceUriList) { List resourceUriList = null; if (mresourceUriList != null) { - resourceUriList = new ArrayList(mresourceUriList.size()); + resourceUriList = new ArrayList<>(mresourceUriList.size()); for (MResourceUri mres : mresourceUriList) { resourceUriList.add( new ResourceUri(ResourceType.findByValue(mres.getResourceType()), mres.getUri())); @@ -8329,7 +8318,7 @@ private List convertToResourceUriList(List mresourceU private List convertToMResourceUriList(List resourceUriList) { List mresourceUriList = null; if (resourceUriList != null) { - mresourceUriList = new ArrayList(resourceUriList.size()); + mresourceUriList = new ArrayList<>(resourceUriList.size()); for (ResourceUri res : resourceUriList) { mresourceUriList.add(new MResourceUri(res.getResourceType().getValue(), res.getUri())); } @@ -8358,8 +8347,8 @@ public void alterFunction(String dbName, String funcName, Function newFunction) boolean success = false; try { openTransaction(); - funcName = HiveStringUtils.normalizeIdentifier(funcName); - dbName = HiveStringUtils.normalizeIdentifier(dbName); + funcName = normalizeIdentifier(funcName); + dbName = normalizeIdentifier(dbName); MFunction newf = convertToMFunction(newFunction); if (newf == null) { throw new InvalidObjectException("new function is invalid"); @@ -8371,7 +8360,7 @@ public void alterFunction(String dbName, String funcName, Function newFunction) } // For now only alter name, owner, class name, type - oldf.setFunctionName(HiveStringUtils.normalizeIdentifier(newf.getFunctionName())); + oldf.setFunctionName(normalizeIdentifier(newf.getFunctionName())); oldf.setDatabase(newf.getDatabase()); oldf.setOwnerName(newf.getOwnerName()); oldf.setOwnerType(newf.getOwnerType()); @@ -8413,8 +8402,8 @@ private MFunction getMFunction(String db, String function) { Query query = null; try { openTransaction(); - db = HiveStringUtils.normalizeIdentifier(db); - function = HiveStringUtils.normalizeIdentifier(function); + db = normalizeIdentifier(db); + function = normalizeIdentifier(function); query = pm.newQuery(MFunction.class, "functionName == function && database.name == db"); query.declareParameters("java.lang.String function, java.lang.String db"); query.setUnique(true); @@ -8467,7 +8456,7 @@ public List getFunctions(String dbName, String pattern) throws MetaExcep List funcs = null; try { openTransaction(); - dbName = HiveStringUtils.normalizeIdentifier(dbName); + dbName = normalizeIdentifier(dbName); // Take the pattern and split it on the | to get all the composing // patterns List parameterVals = new ArrayList<>(); @@ -8480,7 +8469,7 @@ public List getFunctions(String dbName, String pattern) throws MetaExcep query.setResult("functionName"); query.setOrdering("functionName ascending"); Collection names = (Collection) query.executeWithArray(parameterVals.toArray(new String[parameterVals.size()])); - funcs = new ArrayList(); + funcs = new ArrayList<>(); for (Iterator i = names.iterator(); i.hasNext();) { funcs.add((String) i.next()); } @@ -8856,8 +8845,8 @@ protected List getPrimaryKeysInternal(final String db_name_input, final String tbl_name_input, boolean allowSql, boolean allowJdo) throws MetaException, NoSuchObjectException { - final String db_name = HiveStringUtils.normalizeIdentifier(db_name_input); - final String tbl_name = HiveStringUtils.normalizeIdentifier(tbl_name_input); + final String db_name = normalizeIdentifier(db_name_input); + final String tbl_name = normalizeIdentifier(tbl_name_input); return new GetListHelper(db_name, tbl_name, allowSql, allowJdo) { @Override @@ -8885,7 +8874,7 @@ private List getPrimaryKeysViaJdo(String db_name, String tbl_name query.declareParameters("java.lang.String tbl_name, java.lang.String db_name"); Collection constraints = (Collection) query.execute(tbl_name, db_name); pm.retrieveAll(constraints); - primaryKeys = new ArrayList(); + primaryKeys = new ArrayList<>(); for (Iterator i = constraints.iterator(); i.hasNext();) { MConstraint currPK = (MConstraint) i.next(); int enableValidateRely = currPK.getEnableValidateRely(); @@ -8981,7 +8970,7 @@ private List getForeignKeysViaJdo(String parent_db_name, List foreignKeys = null; Collection constraints = null; Query query = null; - Map tblToConstraint = new HashMap(); + Map tblToConstraint = new HashMap<>(); try { openTransaction(); String queryText = (parent_tbl_name != null ? "parentTable.tableName == parent_tbl_name && " : "") @@ -9000,7 +8989,7 @@ private List getForeignKeysViaJdo(String parent_db_name, paramText = paramText.substring(0, paramText.length()-1); } query.declareParameters(paramText); - List params = new ArrayList(); + List params = new ArrayList<>(); if (parent_tbl_name != null) { params.add(parent_tbl_name); } @@ -9026,7 +9015,7 @@ private List getForeignKeysViaJdo(String parent_db_name, params.get(2), params.get(3)); } pm.retrieveAll(constraints); - foreignKeys = new ArrayList(); + foreignKeys = new ArrayList<>(); for (Iterator i = constraints.iterator(); i.hasNext();) { MConstraint currPKFK = (MConstraint) i.next(); int enableValidateRely = currPKFK.getEnableValidateRely(); @@ -9076,8 +9065,8 @@ public List getUniqueConstraints(String db_name, String tbl protected List getUniqueConstraintsInternal(final String db_name_input, final String tbl_name_input, boolean allowSql, boolean allowJdo) throws MetaException, NoSuchObjectException { - final String db_name = HiveStringUtils.normalizeIdentifier(db_name_input); - final String tbl_name = HiveStringUtils.normalizeIdentifier(tbl_name_input); + final String db_name = normalizeIdentifier(db_name_input); + final String tbl_name = normalizeIdentifier(tbl_name_input); return new GetListHelper(db_name, tbl_name, allowSql, allowJdo) { @Override @@ -9107,7 +9096,7 @@ private List getUniqueConstraintsViaJdo(String db_name, Str query.declareParameters("java.lang.String tbl_name, java.lang.String db_name"); Collection constraints = (Collection) query.execute(tbl_name, db_name); pm.retrieveAll(constraints); - uniqueConstraints = new ArrayList(); + uniqueConstraints = new ArrayList<>(); for (Iterator i = constraints.iterator(); i.hasNext();) { MConstraint currPK = (MConstraint) i.next(); int enableValidateRely = currPK.getEnableValidateRely(); @@ -9145,8 +9134,8 @@ public List getNotNullConstraints(String db_name, String t protected List getNotNullConstraintsInternal(final String db_name_input, final String tbl_name_input, boolean allowSql, boolean allowJdo) throws MetaException, NoSuchObjectException { - final String db_name = HiveStringUtils.normalizeIdentifier(db_name_input); - final String tbl_name = HiveStringUtils.normalizeIdentifier(tbl_name_input); + final String db_name = normalizeIdentifier(db_name_input); + final String tbl_name = normalizeIdentifier(tbl_name_input); return new GetListHelper(db_name, tbl_name, allowSql, allowJdo) { @Override @@ -9176,7 +9165,7 @@ private List getNotNullConstraintsViaJdo(String db_name, S query.declareParameters("java.lang.String tbl_name, java.lang.String db_name"); Collection constraints = (Collection) query.execute(tbl_name, db_name); pm.retrieveAll(constraints); - notNullConstraints = new ArrayList(); + notNullConstraints = new ArrayList<>(); for (Iterator i = constraints.iterator(); i.hasNext();) { MConstraint currPK = (MConstraint) i.next(); int enableValidateRely = currPK.getEnableValidateRely(); diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/PartFilterExprUtil.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/PartFilterExprUtil.java index b94063e899da..3a7ee93cf838 100644 --- a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/PartFilterExprUtil.java +++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/PartFilterExprUtil.java @@ -49,7 +49,8 @@ public static ExpressionTree makeExpressionTree(PartitionExpressionProxy express try { filter = expressionProxy.convertExprToFilter(expr); } catch (MetaException ex) { - // TODO - for now we have construct this by reflection because IMetaStoreClient can't be + // TODO MS-SPLIT - for now we have construct this by reflection because IMetaStoreClient + // can't be // moved until after HiveMetaStore is moved, which can't be moved until this is moved. Class exClass = JavaUtils.getClass( "org.apache.hadoop.hive.metastore.IMetaStoreClient.IncompatibleMetastoreException", diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/RawStore.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/RawStore.java similarity index 72% rename from metastore/src/java/org/apache/hadoop/hive/metastore/RawStore.java rename to standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/RawStore.java index 2bc4d99a716d..0e6d8a4da8af 100644 --- a/metastore/src/java/org/apache/hadoop/hive/metastore/RawStore.java +++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/RawStore.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -26,8 +26,8 @@ import java.util.List; import java.util.Map; +import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configurable; -import org.apache.hadoop.hive.common.classification.InterfaceStability; import org.apache.hadoop.hive.metastore.api.AggrStats; import org.apache.hadoop.hive.metastore.api.ColumnStatistics; import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj; @@ -76,10 +76,10 @@ public interface RawStore extends Configurable { */ @Target(value = ElementType.METHOD) @Retention(value = RetentionPolicy.RUNTIME) - public @interface CanNotRetry { + @interface CanNotRetry { } - public abstract void shutdown(); + void shutdown(); /** * Opens a new one or the one already created Every call of this function must @@ -88,7 +88,7 @@ public interface RawStore extends Configurable { * @return an active transaction */ - public abstract boolean openTransaction(); + boolean openTransaction(); /** * if this is the commit of the first open call then an actual commit is @@ -97,77 +97,77 @@ public interface RawStore extends Configurable { * @return true or false */ @CanNotRetry - public abstract boolean commitTransaction(); + boolean commitTransaction(); - public boolean isActiveTransaction(); + boolean isActiveTransaction(); /** * Rolls back the current transaction if it is active */ @CanNotRetry - public abstract void rollbackTransaction(); + void rollbackTransaction(); - public abstract void createDatabase(Database db) + void createDatabase(Database db) throws InvalidObjectException, MetaException; - public abstract Database getDatabase(String name) + Database getDatabase(String name) throws NoSuchObjectException; - public abstract boolean dropDatabase(String dbname) throws NoSuchObjectException, MetaException; + boolean dropDatabase(String dbname) throws NoSuchObjectException, MetaException; - public abstract boolean alterDatabase(String dbname, Database db) throws NoSuchObjectException, MetaException; + boolean alterDatabase(String dbname, Database db) throws NoSuchObjectException, MetaException; - public abstract List getDatabases(String pattern) throws MetaException; + List getDatabases(String pattern) throws MetaException; - public abstract List getAllDatabases() throws MetaException; + List getAllDatabases() throws MetaException; - public abstract boolean createType(Type type); + boolean createType(Type type); - public abstract Type getType(String typeName); + Type getType(String typeName); - public abstract boolean dropType(String typeName); + boolean dropType(String typeName); - public abstract void createTable(Table tbl) throws InvalidObjectException, + void createTable(Table tbl) throws InvalidObjectException, MetaException; - public abstract boolean dropTable(String dbName, String tableName) + boolean dropTable(String dbName, String tableName) throws MetaException, NoSuchObjectException, InvalidObjectException, InvalidInputException; - public abstract Table getTable(String dbName, String tableName) + Table getTable(String dbName, String tableName) throws MetaException; - public abstract boolean addPartition(Partition part) + boolean addPartition(Partition part) throws InvalidObjectException, MetaException; - public abstract boolean addPartitions(String dbName, String tblName, List parts) + boolean addPartitions(String dbName, String tblName, List parts) throws InvalidObjectException, MetaException; - public abstract boolean addPartitions(String dbName, String tblName, PartitionSpecProxy partitionSpec, boolean ifNotExists) + boolean addPartitions(String dbName, String tblName, PartitionSpecProxy partitionSpec, boolean ifNotExists) throws InvalidObjectException, MetaException; - public abstract Partition getPartition(String dbName, String tableName, + Partition getPartition(String dbName, String tableName, List part_vals) throws MetaException, NoSuchObjectException; - public abstract boolean doesPartitionExist(String dbName, String tableName, + boolean doesPartitionExist(String dbName, String tableName, List part_vals) throws MetaException, NoSuchObjectException; - public abstract boolean dropPartition(String dbName, String tableName, + boolean dropPartition(String dbName, String tableName, List part_vals) throws MetaException, NoSuchObjectException, InvalidObjectException, InvalidInputException; - public abstract List getPartitions(String dbName, + List getPartitions(String dbName, String tableName, int max) throws MetaException, NoSuchObjectException; - public abstract void alterTable(String dbname, String name, Table newTable) + void alterTable(String dbname, String name, Table newTable) throws InvalidObjectException, MetaException; - public List getTables(String dbName, String pattern) + List getTables(String dbName, String pattern) throws MetaException; - public List getTables(String dbName, String pattern, TableType tableType) + List getTables(String dbName, String pattern, TableType tableType) throws MetaException; - public List getTableMeta( + List getTableMeta( String dbNames, String tableNames, List tableTypes) throws MetaException; /** @@ -180,10 +180,10 @@ public List getTableMeta( * If there are duplicate names, only one instance of the table will be returned * @throws MetaException */ - public List
getTableObjectsByName(String dbname, List tableNames) + List
getTableObjectsByName(String dbname, List tableNames) throws MetaException, UnknownDBException; - public List getAllTables(String dbName) throws MetaException; + List getAllTables(String dbName) throws MetaException; /** * Gets a list of tables based on a filter string and filter type. @@ -197,127 +197,127 @@ public List
getTableObjectsByName(String dbname, List tableNames) * @throws MetaException * @throws UnknownDBException */ - public abstract List listTableNamesByFilter(String dbName, + List listTableNamesByFilter(String dbName, String filter, short max_tables) throws MetaException, UnknownDBException; - public abstract List listPartitionNames(String db_name, + List listPartitionNames(String db_name, String tbl_name, short max_parts) throws MetaException; - public abstract PartitionValuesResponse listPartitionValues(String db_name, String tbl_name, - List cols, boolean applyDistinct, String filter, boolean ascending, - List order, long maxParts) throws MetaException; + PartitionValuesResponse listPartitionValues(String db_name, String tbl_name, + List cols, boolean applyDistinct, String filter, boolean ascending, + List order, long maxParts) throws MetaException; - public abstract List listPartitionNamesByFilter(String db_name, + List listPartitionNamesByFilter(String db_name, String tbl_name, String filter, short max_parts) throws MetaException; - public abstract void alterPartition(String db_name, String tbl_name, List part_vals, + void alterPartition(String db_name, String tbl_name, List part_vals, Partition new_part) throws InvalidObjectException, MetaException; - public abstract void alterPartitions(String db_name, String tbl_name, + void alterPartitions(String db_name, String tbl_name, List> part_vals_list, List new_parts) throws InvalidObjectException, MetaException; - public abstract boolean addIndex(Index index) + boolean addIndex(Index index) throws InvalidObjectException, MetaException; - public abstract Index getIndex(String dbName, String origTableName, String indexName) throws MetaException; + Index getIndex(String dbName, String origTableName, String indexName) throws MetaException; - public abstract boolean dropIndex(String dbName, String origTableName, String indexName) throws MetaException; + boolean dropIndex(String dbName, String origTableName, String indexName) throws MetaException; - public abstract List getIndexes(String dbName, + List getIndexes(String dbName, String origTableName, int max) throws MetaException; - public abstract List listIndexNames(String dbName, + List listIndexNames(String dbName, String origTableName, short max) throws MetaException; - public abstract void alterIndex(String dbname, String baseTblName, String name, Index newIndex) + void alterIndex(String dbname, String baseTblName, String name, Index newIndex) throws InvalidObjectException, MetaException; - public abstract List getPartitionsByFilter( + List getPartitionsByFilter( String dbName, String tblName, String filter, short maxParts) throws MetaException, NoSuchObjectException; - public abstract boolean getPartitionsByExpr(String dbName, String tblName, + boolean getPartitionsByExpr(String dbName, String tblName, byte[] expr, String defaultPartitionName, short maxParts, List result) throws TException; - public abstract int getNumPartitionsByFilter(String dbName, String tblName, String filter) + int getNumPartitionsByFilter(String dbName, String tblName, String filter) throws MetaException, NoSuchObjectException; - public abstract int getNumPartitionsByExpr(String dbName, String tblName, byte[] expr) throws MetaException, NoSuchObjectException; + int getNumPartitionsByExpr(String dbName, String tblName, byte[] expr) throws MetaException, NoSuchObjectException; - public abstract List getPartitionsByNames( + List getPartitionsByNames( String dbName, String tblName, List partNames) throws MetaException, NoSuchObjectException; - public abstract Table markPartitionForEvent(String dbName, String tblName, Map partVals, PartitionEventType evtType) throws MetaException, UnknownTableException, InvalidPartitionException, UnknownPartitionException; + Table markPartitionForEvent(String dbName, String tblName, Map partVals, PartitionEventType evtType) throws MetaException, UnknownTableException, InvalidPartitionException, UnknownPartitionException; - public abstract boolean isPartitionMarkedForEvent(String dbName, String tblName, Map partName, PartitionEventType evtType) throws MetaException, UnknownTableException, InvalidPartitionException, UnknownPartitionException; + boolean isPartitionMarkedForEvent(String dbName, String tblName, Map partName, PartitionEventType evtType) throws MetaException, UnknownTableException, InvalidPartitionException, UnknownPartitionException; - public abstract boolean addRole(String rowName, String ownerName) + boolean addRole(String rowName, String ownerName) throws InvalidObjectException, MetaException, NoSuchObjectException; - public abstract boolean removeRole(String roleName) throws MetaException, NoSuchObjectException; + boolean removeRole(String roleName) throws MetaException, NoSuchObjectException; - public abstract boolean grantRole(Role role, String userName, PrincipalType principalType, + boolean grantRole(Role role, String userName, PrincipalType principalType, String grantor, PrincipalType grantorType, boolean grantOption) throws MetaException, NoSuchObjectException, InvalidObjectException; - public abstract boolean revokeRole(Role role, String userName, PrincipalType principalType, + boolean revokeRole(Role role, String userName, PrincipalType principalType, boolean grantOption) throws MetaException, NoSuchObjectException; - public abstract PrincipalPrivilegeSet getUserPrivilegeSet(String userName, + PrincipalPrivilegeSet getUserPrivilegeSet(String userName, List groupNames) throws InvalidObjectException, MetaException; - public abstract PrincipalPrivilegeSet getDBPrivilegeSet (String dbName, String userName, + PrincipalPrivilegeSet getDBPrivilegeSet (String dbName, String userName, List groupNames) throws InvalidObjectException, MetaException; - public abstract PrincipalPrivilegeSet getTablePrivilegeSet (String dbName, String tableName, + PrincipalPrivilegeSet getTablePrivilegeSet (String dbName, String tableName, String userName, List groupNames) throws InvalidObjectException, MetaException; - public abstract PrincipalPrivilegeSet getPartitionPrivilegeSet (String dbName, String tableName, + PrincipalPrivilegeSet getPartitionPrivilegeSet (String dbName, String tableName, String partition, String userName, List groupNames) throws InvalidObjectException, MetaException; - public abstract PrincipalPrivilegeSet getColumnPrivilegeSet (String dbName, String tableName, String partitionName, + PrincipalPrivilegeSet getColumnPrivilegeSet (String dbName, String tableName, String partitionName, String columnName, String userName, List groupNames) throws InvalidObjectException, MetaException; - public abstract List listPrincipalGlobalGrants(String principalName, + List listPrincipalGlobalGrants(String principalName, PrincipalType principalType); - public abstract List listPrincipalDBGrants(String principalName, + List listPrincipalDBGrants(String principalName, PrincipalType principalType, String dbName); - public abstract List listAllTableGrants( + List listAllTableGrants( String principalName, PrincipalType principalType, String dbName, String tableName); - public abstract List listPrincipalPartitionGrants( + List listPrincipalPartitionGrants( String principalName, PrincipalType principalType, String dbName, String tableName, List partValues, String partName); - public abstract List listPrincipalTableColumnGrants( + List listPrincipalTableColumnGrants( String principalName, PrincipalType principalType, String dbName, String tableName, String columnName); - public abstract List listPrincipalPartitionColumnGrants( + List listPrincipalPartitionColumnGrants( String principalName, PrincipalType principalType, String dbName, String tableName, List partValues, String partName, String columnName); - public abstract boolean grantPrivileges (PrivilegeBag privileges) + boolean grantPrivileges (PrivilegeBag privileges) throws InvalidObjectException, MetaException, NoSuchObjectException; - public abstract boolean revokePrivileges (PrivilegeBag privileges, boolean grantOption) + boolean revokePrivileges (PrivilegeBag privileges, boolean grantOption) throws InvalidObjectException, MetaException, NoSuchObjectException; - public abstract org.apache.hadoop.hive.metastore.api.Role getRole( + org.apache.hadoop.hive.metastore.api.Role getRole( String roleName) throws NoSuchObjectException; - public List listRoleNames(); + List listRoleNames(); - public List listRoles(String principalName, + List listRoles(String principalName, PrincipalType principalType); - public List listRolesWithGrants(String principalName, + List listRolesWithGrants(String principalName, PrincipalType principalType); @@ -326,14 +326,14 @@ public List listRolesWithGrants(String principalName, * @param roleName * @return */ - public List listRoleMembers(String roleName); + List listRoleMembers(String roleName); - public abstract Partition getPartitionWithAuth(String dbName, String tblName, + Partition getPartitionWithAuth(String dbName, String tblName, List partVals, String user_name, List group_names) throws MetaException, NoSuchObjectException, InvalidObjectException; - public abstract List getPartitionsWithAuth(String dbName, + List getPartitionsWithAuth(String dbName, String tblName, short maxParts, String userName, List groupNames) throws MetaException, NoSuchObjectException, InvalidObjectException; @@ -352,7 +352,7 @@ public abstract List getPartitionsWithAuth(String dbName, * @throws MetaException * @throws NoSuchObjectException */ - public abstract List listPartitionNamesPs(String db_name, String tbl_name, + List listPartitionNamesPs(String db_name, String tbl_name, List part_vals, short max_parts) throws MetaException, NoSuchObjectException; @@ -377,7 +377,7 @@ public abstract List listPartitionNamesPs(String db_name, String tbl_nam * @throws NoSuchObjectException * @throws InvalidObjectException */ - public abstract List listPartitionsPsWithAuth(String db_name, String tbl_name, + List listPartitionsPsWithAuth(String db_name, String tbl_name, List part_vals, short max_parts, String userName, List groupNames) throws MetaException, InvalidObjectException, NoSuchObjectException; @@ -389,7 +389,7 @@ public abstract List listPartitionsPsWithAuth(String db_name, String * @throws InvalidObjectException * @throws InvalidInputException */ - public abstract boolean updateTableColumnStatistics(ColumnStatistics colStats) + boolean updateTableColumnStatistics(ColumnStatistics colStats) throws NoSuchObjectException, MetaException, InvalidObjectException, InvalidInputException; /** Persists the given column statistics object to the metastore @@ -402,7 +402,7 @@ public abstract boolean updateTableColumnStatistics(ColumnStatistics colStats) * @throws InvalidObjectException * @throws InvalidInputException */ - public abstract boolean updatePartitionColumnStatistics(ColumnStatistics statsObj, + boolean updatePartitionColumnStatistics(ColumnStatistics statsObj, List partVals) throws NoSuchObjectException, MetaException, InvalidObjectException, InvalidInputException; @@ -418,14 +418,14 @@ public abstract boolean updatePartitionColumnStatistics(ColumnStatistics statsOb * @throws MetaException * */ - public abstract ColumnStatistics getTableColumnStatistics(String dbName, String tableName, + ColumnStatistics getTableColumnStatistics(String dbName, String tableName, List colName) throws MetaException, NoSuchObjectException; /** * Returns the relevant column statistics for given columns in given partitions in a given * table in a given database if such statistics exist. */ - public abstract List getPartitionColumnStatistics( + List getPartitionColumnStatistics( String dbName, String tblName, List partNames, List colNames) throws MetaException, NoSuchObjectException; @@ -446,7 +446,7 @@ public abstract List getPartitionColumnStatistics( * @throws InvalidInputException */ - public abstract boolean deletePartitionColumnStatistics(String dbName, String tableName, + boolean deletePartitionColumnStatistics(String dbName, String tableName, String partName, List partVals, String colName) throws NoSuchObjectException, MetaException, InvalidObjectException, InvalidInputException; @@ -465,34 +465,34 @@ public abstract boolean deletePartitionColumnStatistics(String dbName, String ta * @throws InvalidInputException */ - public abstract boolean deleteTableColumnStatistics(String dbName, String tableName, + boolean deleteTableColumnStatistics(String dbName, String tableName, String colName) throws NoSuchObjectException, MetaException, InvalidObjectException, InvalidInputException; - public abstract long cleanupEvents(); + long cleanupEvents(); - public abstract boolean addToken(String tokenIdentifier, String delegationToken); + boolean addToken(String tokenIdentifier, String delegationToken); - public abstract boolean removeToken(String tokenIdentifier); + boolean removeToken(String tokenIdentifier); - public abstract String getToken(String tokenIdentifier); + String getToken(String tokenIdentifier); - public abstract List getAllTokenIdentifiers(); + List getAllTokenIdentifiers(); - public abstract int addMasterKey(String key) throws MetaException; + int addMasterKey(String key) throws MetaException; - public abstract void updateMasterKey(Integer seqNo, String key) + void updateMasterKey(Integer seqNo, String key) throws NoSuchObjectException, MetaException; - public abstract boolean removeMasterKey(Integer keySeq); + boolean removeMasterKey(Integer keySeq); - public abstract String[] getMasterKeys(); + String[] getMasterKeys(); - public abstract void verifySchema() throws MetaException; + void verifySchema() throws MetaException; - public abstract String getMetaStoreSchemaVersion() throws MetaException; + String getMetaStoreSchemaVersion() throws MetaException; - public abstract void setMetaStoreSchemaVersion(String version, String comment) throws MetaException; + abstract void setMetaStoreSchemaVersion(String version, String comment) throws MetaException; void dropPartitions(String dbName, String tblName, List partNames) throws MetaException, NoSuchObjectException; @@ -533,7 +533,7 @@ List listTableColumnGrantsAll( * @throws InvalidObjectException * @throws MetaException */ - public void createFunction(Function func) + void createFunction(Function func) throws InvalidObjectException, MetaException; /** @@ -544,7 +544,7 @@ public void createFunction(Function func) * @throws InvalidObjectException * @throws MetaException */ - public void alterFunction(String dbName, String funcName, Function newFunction) + void alterFunction(String dbName, String funcName, Function newFunction) throws InvalidObjectException, MetaException; /** @@ -556,7 +556,7 @@ public void alterFunction(String dbName, String funcName, Function newFunction) * @throws InvalidObjectException * @throws InvalidInputException */ - public void dropFunction(String dbName, String funcName) + void dropFunction(String dbName, String funcName) throws MetaException, NoSuchObjectException, InvalidObjectException, InvalidInputException; /** @@ -566,14 +566,14 @@ public void dropFunction(String dbName, String funcName) * @return * @throws MetaException */ - public Function getFunction(String dbName, String funcName) throws MetaException; + Function getFunction(String dbName, String funcName) throws MetaException; /** * Retrieve all functions. * @return * @throws MetaException */ - public List getAllFunctions() throws MetaException; + List getAllFunctions() throws MetaException; /** * Retrieve list of function names based on name pattern. @@ -582,9 +582,9 @@ public void dropFunction(String dbName, String funcName) * @return * @throws MetaException */ - public List getFunctions(String dbName, String pattern) throws MetaException; + List getFunctions(String dbName, String pattern) throws MetaException; - public AggrStats get_aggr_stats_for(String dbName, String tblName, + AggrStats get_aggr_stats_for(String dbName, String tblName, List partNames, List colNames) throws MetaException, NoSuchObjectException; /** @@ -597,7 +597,7 @@ public AggrStats get_aggr_stats_for(String dbName, String tblName, * @throws MetaException * @throws NoSuchObjectException */ - public Map> getColStatsForTablePartitions(String dbName, + Map> getColStatsForTablePartitions(String dbName, String tableName) throws MetaException, NoSuchObjectException; /** @@ -605,20 +605,20 @@ public Map> getColStatsForTablePartitions(Stri * @param rqst Request containing information on the last processed notification. * @return list of notifications, sorted by eventId */ - public NotificationEventResponse getNextNotification(NotificationEventRequest rqst); + NotificationEventResponse getNextNotification(NotificationEventRequest rqst); /** * Add a notification entry. This should only be called from inside the metastore * @param event the notification to add */ - public void addNotificationEvent(NotificationEvent event); + void addNotificationEvent(NotificationEvent event); /** * Remove older notification events. * @param olderThan Remove any events older than a given number of seconds */ - public void cleanNotificationEvents(int olderThan); + void cleanNotificationEvents(int olderThan); /** * Get the last issued notification event id. This is intended for use by the export command @@ -626,7 +626,7 @@ public Map> getColStatsForTablePartitions(Stri * and determine which notification events happened before or after the export. * @return */ - public CurrentNotificationEventId getCurrentNotificationEventId(); + CurrentNotificationEventId getCurrentNotificationEventId(); /** * Get the number of events corresponding to given database with fromEventId. @@ -639,7 +639,7 @@ public Map> getColStatsForTablePartitions(Stri * Flush any catalog objects held by the metastore implementation. Note that this does not * flush statistics objects. This should be called at the beginning of each query. */ - public void flushCache(); + void flushCache(); /** * @param fileIds List of file IDs from the filesystem. @@ -699,7 +699,7 @@ void getFileMetadataByExpr(List fileIds, FileMetadataExprType type, byte[] @InterfaceStability.Evolving int getDatabaseCount() throws MetaException; - public abstract List getPrimaryKeys(String db_name, + List getPrimaryKeys(String db_name, String tbl_name) throws MetaException; /** @@ -714,14 +714,14 @@ public abstract List getPrimaryKeys(String db_name, * matches the arguments the results here will be all mixed together into a single list. * @throws MetaException if something goes wrong. */ - public abstract List getForeignKeys(String parent_db_name, + List getForeignKeys(String parent_db_name, String parent_tbl_name, String foreign_db_name, String foreign_tbl_name) throws MetaException; - public abstract List getUniqueConstraints(String db_name, + List getUniqueConstraints(String db_name, String tbl_name) throws MetaException; - public abstract List getNotNullConstraints(String db_name, + List getNotNullConstraints(String db_name, String tbl_name) throws MetaException; List createTableWithConstraints(Table tbl, List primaryKeys, diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/cache/ByteArrayWrapper.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/cache/ByteArrayWrapper.java similarity index 99% rename from metastore/src/java/org/apache/hadoop/hive/metastore/cache/ByteArrayWrapper.java rename to standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/cache/ByteArrayWrapper.java index 45ed1e70e7d6..2e92a4f4e128 100644 --- a/metastore/src/java/org/apache/hadoop/hive/metastore/cache/ByteArrayWrapper.java +++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/cache/ByteArrayWrapper.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/cache/CacheUtils.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/cache/CacheUtils.java similarity index 85% rename from metastore/src/java/org/apache/hadoop/hive/metastore/cache/CacheUtils.java rename to standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/cache/CacheUtils.java index aaeb6d476850..ab6b90fb6bad 100644 --- a/metastore/src/java/org/apache/hadoop/hive/metastore/cache/CacheUtils.java +++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/cache/CacheUtils.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -22,14 +22,13 @@ import java.util.List; import java.util.regex.Pattern; -import org.apache.hadoop.hive.metastore.api.Order; import org.apache.hadoop.hive.metastore.api.Partition; import org.apache.hadoop.hive.metastore.api.SkewedInfo; import org.apache.hadoop.hive.metastore.api.StorageDescriptor; import org.apache.hadoop.hive.metastore.api.Table; import org.apache.hadoop.hive.metastore.cache.CachedStore.PartitionWrapper; import org.apache.hadoop.hive.metastore.cache.CachedStore.TableWrapper; -import org.apache.hive.common.util.HiveStringUtils; +import org.apache.hadoop.hive.metastore.utils.StringUtils; public class CacheUtils { private static final String delimit = "\u0001"; @@ -79,7 +78,7 @@ public static Object[] splitPartitionColStats(String key) { String[] comps = key.split(delimit); result[0] = comps[0]; result[1] = comps[1]; - List vals = new ArrayList(); + List vals = new ArrayList<>(); for (int i=2;i()); + sdCopy.setBucketCols(new ArrayList<>()); } if (sdCopy.getSortCols()==null) { - sdCopy.setSortCols(new ArrayList()); + sdCopy.setSortCols(new ArrayList<>()); } if (sdCopy.getSkewedInfo()==null) { - sdCopy.setSkewedInfo(new SkewedInfo(new ArrayList(), - new ArrayList>(), new HashMap,String>())); + sdCopy.setSkewedInfo(new SkewedInfo(new ArrayList<>(), + new ArrayList<>(), new HashMap<>())); } sdCopy.setLocation(wrapper.getLocation()); sdCopy.setParameters(wrapper.getParameters()); @@ -114,14 +113,14 @@ static Partition assemble(PartitionWrapper wrapper, SharedCache sharedCache) { if (wrapper.getSdHash()!=null) { StorageDescriptor sdCopy = sharedCache.getSdFromCache(wrapper.getSdHash()).deepCopy(); if (sdCopy.getBucketCols()==null) { - sdCopy.setBucketCols(new ArrayList()); + sdCopy.setBucketCols(new ArrayList<>()); } if (sdCopy.getSortCols()==null) { - sdCopy.setSortCols(new ArrayList()); + sdCopy.setSortCols(new ArrayList<>()); } if (sdCopy.getSkewedInfo()==null) { - sdCopy.setSkewedInfo(new SkewedInfo(new ArrayList(), - new ArrayList>(), new HashMap,String>())); + sdCopy.setSkewedInfo(new SkewedInfo(new ArrayList<>(), + new ArrayList<>(), new HashMap<>())); } sdCopy.setLocation(wrapper.getLocation()); sdCopy.setParameters(wrapper.getParameters()); @@ -135,7 +134,7 @@ public static boolean matches(String name, String pattern) { for (String subpattern : subpatterns) { subpattern = "(?i)" + subpattern.replaceAll("\\?", ".{1}").replaceAll("\\*", ".*") .replaceAll("\\^", "\\\\^").replaceAll("\\$", "\\\\$"); - if (Pattern.matches(subpattern, HiveStringUtils.normalizeIdentifier(name))) { + if (Pattern.matches(subpattern, StringUtils.normalizeIdentifier(name))) { return true; } } diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java similarity index 95% rename from metastore/src/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java rename to standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java index edc8e14f30c4..30aa9bd18e77 100644 --- a/metastore/src/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java +++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -32,13 +32,9 @@ import org.apache.hadoop.conf.Configurable; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hive.common.FileUtils; import org.apache.hadoop.hive.common.StatsSetupConst; -import org.apache.hadoop.hive.conf.HiveConf; -import org.apache.hadoop.hive.conf.HiveConf.ConfVars; import org.apache.hadoop.hive.metastore.Deadline; import org.apache.hadoop.hive.metastore.FileMetadataHandler; -import org.apache.hadoop.hive.metastore.MetaStoreUtils; import org.apache.hadoop.hive.metastore.ObjectStore; import org.apache.hadoop.hive.metastore.PartFilterExprUtil; import org.apache.hadoop.hive.metastore.PartitionExpressionProxy; @@ -86,8 +82,12 @@ import org.apache.hadoop.hive.metastore.api.UnknownPartitionException; import org.apache.hadoop.hive.metastore.api.UnknownTableException; import org.apache.hadoop.hive.metastore.conf.MetastoreConf; +import org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars; import org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy; -import org.apache.hive.common.util.HiveStringUtils; +import org.apache.hadoop.hive.metastore.utils.FileUtils; +import org.apache.hadoop.hive.metastore.utils.JavaUtils; +import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils; +import org.apache.hadoop.hive.metastore.utils.StringUtils; import org.apache.thrift.TException; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -216,12 +216,11 @@ public static void initSharedCacheAsync(HiveConf conf) { @Override public void setConf(Configuration conf) { - String rawStoreClassName = HiveConf.getVar(conf, HiveConf.ConfVars.METASTORE_CACHED_RAW_STORE_IMPL, + String rawStoreClassName = MetastoreConf.getVar(conf, ConfVars.CACHED_RAW_STORE_IMPL, ObjectStore.class.getName()); if (rawStore == null) { try { - rawStore = ((Class) MetaStoreUtils.getClass( - rawStoreClassName)).newInstance(); + rawStore = (JavaUtils.getClass(rawStoreClassName, RawStore.class)).newInstance(); } catch (Exception e) { throw new RuntimeException("Cannot instantiate " + rawStoreClassName, e); } @@ -245,17 +244,17 @@ static void prewarm(RawStore rawStore) throws Exception { SharedCache sharedCache = sharedCacheWrapper.getUnsafe(); for (String dbName : dbNames) { Database db = rawStore.getDatabase(dbName); - sharedCache.addDatabaseToCache(HiveStringUtils.normalizeIdentifier(dbName), db); + sharedCache.addDatabaseToCache(StringUtils.normalizeIdentifier(dbName), db); List tblNames = rawStore.getAllTables(dbName); for (String tblName : tblNames) { Table table = rawStore.getTable(dbName, tblName); - sharedCache.addTableToCache(HiveStringUtils.normalizeIdentifier(dbName), - HiveStringUtils.normalizeIdentifier(tblName), table); + sharedCache.addTableToCache(StringUtils.normalizeIdentifier(dbName), + StringUtils.normalizeIdentifier(tblName), table); Deadline.startTimer("getPartitions"); List partitions = rawStore.getPartitions(dbName, tblName, Integer.MAX_VALUE); Deadline.stopTimer(); for (Partition partition : partitions) { - sharedCache.addPartitionToCache(HiveStringUtils.normalizeIdentifier(dbName), + sharedCache.addPartitionToCache(StringUtils.normalizeIdentifier(dbName), HiveStringUtils.normalizeIdentifier(tblName), partition); } // Cache partition column stats @@ -273,8 +272,8 @@ static void prewarm(RawStore rawStore) throws Exception { rawStore.getTableColumnStatistics(dbName, tblName, colNames); Deadline.stopTimer(); if ((tableColStats != null) && (tableColStats.getStatsObjSize() > 0)) { - sharedCache.addTableColStatsToCache(HiveStringUtils.normalizeIdentifier(dbName), - HiveStringUtils.normalizeIdentifier(tblName), tableColStats.getStatsObj()); + sharedCache.addTableColStatsToCache(StringUtils.normalizeIdentifier(dbName), + StringUtils.normalizeIdentifier(tblName), tableColStats.getStatsObj()); } } } @@ -292,10 +291,9 @@ public Thread newThread(Runnable r) { return t; } }); - if (!HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVE_IN_TEST)) { + if (!MetastoreConf.getBoolVar(conf, ConfVars.HIVE_IN_TEST)) { cacheRefreshPeriod = - HiveConf.getTimeVar(conf, - HiveConf.ConfVars.METASTORE_CACHED_RAW_STORE_CACHE_UPDATE_FREQUENCY, + MetastoreConf.getTimeVar(conf, ConfVars.CACHED_RAW_STORE_CACHE_UPDATE_FREQUENCY, TimeUnit.MILLISECONDS); } LOG.info("CachedStore: starting cache update service (run every " + cacheRefreshPeriod + "ms"); @@ -397,7 +395,7 @@ public void update() { private void updateDatabases(RawStore rawStore, List dbNames) { // Prepare the list of databases - List databases = new ArrayList(); + List databases = new ArrayList<>(); for (String dbName : dbNames) { Database db; try { @@ -426,13 +424,13 @@ private void updateDatabases(RawStore rawStore, List dbNames) { // Update the cached table objects private void updateTables(RawStore rawStore, String dbName) { - List
tables = new ArrayList
(); + List
tables = new ArrayList<>(); try { List tblNames = rawStore.getAllTables(dbName); for (String tblName : tblNames) { Table table = - rawStore.getTable(HiveStringUtils.normalizeIdentifier(dbName), - HiveStringUtils.normalizeIdentifier(tblName)); + rawStore.getTable(StringUtils.normalizeIdentifier(dbName), + StringUtils.normalizeIdentifier(tblName)); tables.add(table); } if (tableCacheLock.writeLock().tryLock()) { @@ -465,8 +463,8 @@ private void updateTablePartitions(RawStore rawStore, String dbName, String tblN return; } sharedCacheWrapper.getUnsafe().refreshPartitions( - HiveStringUtils.normalizeIdentifier(dbName), - HiveStringUtils.normalizeIdentifier(tblName), partitions); + StringUtils.normalizeIdentifier(dbName), + StringUtils.normalizeIdentifier(tblName), partitions); } } catch (MetaException | NoSuchObjectException e) { LOG.info("Updating CachedStore: unable to read partitions of table: " + tblName, e); @@ -495,8 +493,8 @@ private void updateTableColStats(RawStore rawStore, String dbName, String tblNam return; } sharedCacheWrapper.getUnsafe().refreshTableColStats( - HiveStringUtils.normalizeIdentifier(dbName), - HiveStringUtils.normalizeIdentifier(tblName), tableColStats.getStatsObj()); + StringUtils.normalizeIdentifier(dbName), + StringUtils.normalizeIdentifier(tblName), tableColStats.getStatsObj()); } } } catch (MetaException | NoSuchObjectException e) { @@ -524,8 +522,8 @@ private void updateTablePartitionColStats(RawStore rawStore, String dbName, Stri return; } sharedCacheWrapper.getUnsafe().refreshPartitionColStats( - HiveStringUtils.normalizeIdentifier(dbName), - HiveStringUtils.normalizeIdentifier(tblName), colStatsPerPartition); + StringUtils.normalizeIdentifier(dbName), + StringUtils.normalizeIdentifier(tblName), colStatsPerPartition); } } } catch (MetaException | NoSuchObjectException e) { @@ -578,7 +576,7 @@ public void createDatabase(Database db) throws InvalidObjectException, MetaExcep // Wait if background cache update is happening databaseCacheLock.readLock().lock(); isDatabaseCacheDirty.set(true); - sharedCache.addDatabaseToCache(HiveStringUtils.normalizeIdentifier(db.getName()), + sharedCache.addDatabaseToCache(StringUtils.normalizeIdentifier(db.getName()), db.deepCopy()); } finally { databaseCacheLock.readLock().unlock(); @@ -596,7 +594,7 @@ public Database getDatabase(String dbName) throws NoSuchObjectException { } catch (MetaException e) { throw new RuntimeException(e); // TODO: why doesn't getDatabase throw MetaEx? } - Database db = sharedCache.getDatabaseFromCache(HiveStringUtils.normalizeIdentifier(dbName)); + Database db = sharedCache.getDatabaseFromCache(StringUtils.normalizeIdentifier(dbName)); if (db == null) { throw new NoSuchObjectException(); } @@ -613,7 +611,7 @@ public boolean dropDatabase(String dbname) throws NoSuchObjectException, MetaExc // Wait if background cache update is happening databaseCacheLock.readLock().lock(); isDatabaseCacheDirty.set(true); - sharedCache.removeDatabaseFromCache(HiveStringUtils.normalizeIdentifier(dbname)); + sharedCache.removeDatabaseFromCache(StringUtils.normalizeIdentifier(dbname)); } finally { databaseCacheLock.readLock().unlock(); } @@ -632,7 +630,7 @@ public boolean alterDatabase(String dbName, Database db) throws NoSuchObjectExce // Wait if background cache update is happening databaseCacheLock.readLock().lock(); isDatabaseCacheDirty.set(true); - sharedCache.alterDatabaseInCache(HiveStringUtils.normalizeIdentifier(dbName), db); + sharedCache.alterDatabaseInCache(StringUtils.normalizeIdentifier(dbName), db); } finally { databaseCacheLock.readLock().unlock(); } @@ -648,7 +646,7 @@ public List getDatabases(String pattern) throws MetaException { } List results = new ArrayList(); for (String dbName : sharedCache.listCachedDatabases()) { - dbName = HiveStringUtils.normalizeIdentifier(dbName); + dbName = StringUtils.normalizeIdentifier(dbName); if (CacheUtils.matches(dbName, pattern)) { results.add(dbName); } @@ -708,8 +706,8 @@ public void createTable(Table tbl) throws InvalidObjectException, MetaException // Wait if background cache update is happening tableCacheLock.readLock().lock(); isTableCacheDirty.set(true); - sharedCache.addTableToCache(HiveStringUtils.normalizeIdentifier(tbl.getDbName()), - HiveStringUtils.normalizeIdentifier(tbl.getTableName()), tbl); + sharedCache.addTableToCache(StringUtils.normalizeIdentifier(tbl.getDbName()), + StringUtils.normalizeIdentifier(tbl.getTableName()), tbl); } finally { tableCacheLock.readLock().unlock(); } @@ -727,8 +725,8 @@ public boolean dropTable(String dbName, String tableName) throws MetaException, // Wait if background table cache update is happening tableCacheLock.readLock().lock(); isTableCacheDirty.set(true); - sharedCache.removeTableFromCache(HiveStringUtils.normalizeIdentifier(dbName), - HiveStringUtils.normalizeIdentifier(tableName)); + sharedCache.removeTableFromCache(StringUtils.normalizeIdentifier(dbName), + StringUtils.normalizeIdentifier(tableName)); } finally { tableCacheLock.readLock().unlock(); } @@ -737,8 +735,8 @@ public boolean dropTable(String dbName, String tableName) throws MetaException, // Wait if background table col stats cache update is happening tableColStatsCacheLock.readLock().lock(); isTableColStatsCacheDirty.set(true); - sharedCache.removeTableColStatsFromCache(HiveStringUtils.normalizeIdentifier(dbName), - HiveStringUtils.normalizeIdentifier(tableName)); + sharedCache.removeTableColStatsFromCache(StringUtils.normalizeIdentifier(dbName), + StringUtils.normalizeIdentifier(tableName)); } finally { tableColStatsCacheLock.readLock().unlock(); } @@ -752,8 +750,8 @@ public Table getTable(String dbName, String tableName) throws MetaException { if (sharedCache == null) { return rawStore.getTable(dbName, tableName); } - Table tbl = sharedCache.getTableFromCache(HiveStringUtils.normalizeIdentifier(dbName), - HiveStringUtils.normalizeIdentifier(tableName)); + Table tbl = sharedCache.getTableFromCache(StringUtils.normalizeIdentifier(dbName), + StringUtils.normalizeIdentifier(tableName)); if (tbl != null) { tbl.unsetPrivileges(); tbl.setRewriteEnabled(tbl.isRewriteEnabled()); @@ -771,8 +769,8 @@ public boolean addPartition(Partition part) throws InvalidObjectException, MetaE // Wait if background cache update is happening partitionCacheLock.readLock().lock(); isPartitionCacheDirty.set(true); - sharedCache.addPartitionToCache(HiveStringUtils.normalizeIdentifier(part.getDbName()), - HiveStringUtils.normalizeIdentifier(part.getTableName()), part); + sharedCache.addPartitionToCache(StringUtils.normalizeIdentifier(part.getDbName()), + StringUtils.normalizeIdentifier(part.getTableName()), part); } finally { partitionCacheLock.readLock().unlock(); } @@ -792,8 +790,8 @@ public boolean addPartitions(String dbName, String tblName, List part partitionCacheLock.readLock().lock(); isPartitionCacheDirty.set(true); for (Partition part : parts) { - sharedCache.addPartitionToCache(HiveStringUtils.normalizeIdentifier(dbName), - HiveStringUtils.normalizeIdentifier(tblName), part); + sharedCache.addPartitionToCache(StringUtils.normalizeIdentifier(dbName), + StringUtils.normalizeIdentifier(tblName), part); } } finally { partitionCacheLock.readLock().unlock(); @@ -816,8 +814,8 @@ public boolean addPartitions(String dbName, String tblName, PartitionSpecProxy p PartitionSpecProxy.PartitionIterator iterator = partitionSpec.getPartitionIterator(); while (iterator.hasNext()) { Partition part = iterator.next(); - sharedCache.addPartitionToCache(HiveStringUtils.normalizeIdentifier(dbName), - HiveStringUtils.normalizeIdentifier(tblName), part); + sharedCache.addPartitionToCache(StringUtils.normalizeIdentifier(dbName), + StringUtils.normalizeIdentifier(tblName), part); } } finally { partitionCacheLock.readLock().unlock(); @@ -834,8 +832,8 @@ public Partition getPartition(String dbName, String tableName, List part return rawStore.getPartition(dbName, tableName, part_vals); } Partition part = - sharedCache.getPartitionFromCache(HiveStringUtils.normalizeIdentifier(dbName), - HiveStringUtils.normalizeIdentifier(tableName), part_vals); + sharedCache.getPartitionFromCache(StringUtils.normalizeIdentifier(dbName), + StringUtils.normalizeIdentifier(tableName), part_vals); if (part != null) { part.unsetPrivileges(); } else { @@ -851,8 +849,8 @@ public boolean doesPartitionExist(String dbName, String tableName, if (sharedCache == null) { return rawStore.doesPartitionExist(dbName, tableName, part_vals); } - return sharedCache.existPartitionFromCache(HiveStringUtils.normalizeIdentifier(dbName), - HiveStringUtils.normalizeIdentifier(tableName), part_vals); + return sharedCache.existPartitionFromCache(StringUtils.normalizeIdentifier(dbName), + StringUtils.normalizeIdentifier(tableName), part_vals); } @Override @@ -1172,7 +1170,7 @@ private boolean getPartitionNamesPrunedByExprNoTxn(Table table, byte[] expr, result.add(Warehouse.makePartName(table.getPartitionKeys(), part.getValues())); } if (defaultPartName == null || defaultPartName.isEmpty()) { - defaultPartName = HiveConf.getVar(getConf(), HiveConf.ConfVars.DEFAULTPARTITIONNAME); + defaultPartName = MetastoreConf.getVar(getConf(), ConfVars.DEFAULTPARTITIONNAME); } return expressionProxy.filterPartitionsByExpr( table.getPartitionKeys(), expr, defaultPartName, result); @@ -1240,7 +1238,7 @@ public int getNumPartitionsByExpr(String dbName, String tblName, byte[] expr) private static List partNameToVals(String name) { if (name == null) return null; - List vals = new ArrayList(); + List vals = new ArrayList<>(); String[] kvp = name.split("/"); for (String kv : kvp) { vals.add(FileUtils.unescapePathName(kv.substring(kv.indexOf('=') + 1))); @@ -1592,7 +1590,7 @@ public ColumnStatistics getTableColumnStatistics(String dbName, String tableName return rawStore.getTableColumnStatistics(dbName, tableName, colNames); } ColumnStatisticsDesc csd = new ColumnStatisticsDesc(true, dbName, tableName); - List colStatObjs = new ArrayList(); + List colStatObjs = new ArrayList<>(); for (String colName : colNames) { String colStatsCacheKey = CacheUtils.buildKey(HiveStringUtils.normalizeIdentifier(dbName), diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/cache/SharedCache.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/cache/SharedCache.java similarity index 93% rename from metastore/src/java/org/apache/hadoop/hive/metastore/cache/SharedCache.java rename to standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/cache/SharedCache.java index e713de051d0c..043ddddeea84 100644 --- a/metastore/src/java/org/apache/hadoop/hive/metastore/cache/SharedCache.java +++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/cache/SharedCache.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -27,7 +27,6 @@ import java.util.Map.Entry; import java.util.TreeMap; -import org.apache.hadoop.hive.metastore.MetaStoreUtils; import org.apache.hadoop.hive.metastore.StatObjectConverter; import org.apache.hadoop.hive.metastore.Warehouse; import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj; @@ -41,7 +40,8 @@ import org.apache.hadoop.hive.metastore.cache.CachedStore.PartitionWrapper; import org.apache.hadoop.hive.metastore.cache.CachedStore.StorageDescriptorWrapper; import org.apache.hadoop.hive.metastore.cache.CachedStore.TableWrapper; -import org.apache.hive.common.util.HiveStringUtils; +import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils; +import org.apache.hadoop.hive.metastore.utils.StringUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -72,7 +72,7 @@ public synchronized Database getDatabaseFromCache(String name) { public synchronized void addDatabaseToCache(String dbName, Database db) { Database dbCopy = db.deepCopy(); - dbCopy.setName(HiveStringUtils.normalizeIdentifier(dbName)); + dbCopy.setName(StringUtils.normalizeIdentifier(dbName)); databaseCache.put(dbName, dbCopy); } @@ -104,11 +104,11 @@ public synchronized Table getTableFromCache(String dbName, String tableName) { public synchronized void addTableToCache(String dbName, String tblName, Table tbl) { Table tblCopy = tbl.deepCopy(); - tblCopy.setDbName(HiveStringUtils.normalizeIdentifier(dbName)); - tblCopy.setTableName(HiveStringUtils.normalizeIdentifier(tblName)); + tblCopy.setDbName(StringUtils.normalizeIdentifier(dbName)); + tblCopy.setTableName(StringUtils.normalizeIdentifier(tblName)); if (tblCopy.getPartitionKeys() != null) { for (FieldSchema fs : tblCopy.getPartitionKeys()) { - fs.setName(HiveStringUtils.normalizeIdentifier(fs.getName())); + fs.setName(StringUtils.normalizeIdentifier(fs.getName())); } } TableWrapper wrapper; @@ -174,8 +174,8 @@ public synchronized void updateTableColStatsInCache(String dbName, String tableN public synchronized void alterTableInCache(String dbName, String tblName, Table newTable) { removeTableFromCache(dbName, tblName); - addTableToCache(HiveStringUtils.normalizeIdentifier(newTable.getDbName()), - HiveStringUtils.normalizeIdentifier(newTable.getTableName()), newTable); + addTableToCache(StringUtils.normalizeIdentifier(newTable.getDbName()), + StringUtils.normalizeIdentifier(newTable.getTableName()), newTable); } public synchronized void alterTableInPartitionCache(String dbName, String tblName, @@ -184,10 +184,10 @@ public synchronized void alterTableInPartitionCache(String dbName, String tblNam List partitions = listCachedPartitions(dbName, tblName, -1); for (Partition part : partitions) { removePartitionFromCache(part.getDbName(), part.getTableName(), part.getValues()); - part.setDbName(HiveStringUtils.normalizeIdentifier(newTable.getDbName())); - part.setTableName(HiveStringUtils.normalizeIdentifier(newTable.getTableName())); - addPartitionToCache(HiveStringUtils.normalizeIdentifier(newTable.getDbName()), - HiveStringUtils.normalizeIdentifier(newTable.getTableName()), part); + part.setDbName(StringUtils.normalizeIdentifier(newTable.getDbName())); + part.setTableName(StringUtils.normalizeIdentifier(newTable.getTableName())); + addPartitionToCache(StringUtils.normalizeIdentifier(newTable.getDbName()), + StringUtils.normalizeIdentifier(newTable.getTableName()), part); } } } @@ -199,7 +199,7 @@ public synchronized void alterTableInTableColStatsCache(String dbName, String tb Iterator> iterator = tableColStatsCache.entrySet().iterator(); Map newTableColStats = - new HashMap(); + new HashMap<>(); while (iterator.hasNext()) { Entry entry = iterator.next(); String key = entry.getKey(); @@ -219,8 +219,7 @@ public synchronized void alterTableInPartitionColStatsCache(String dbName, Strin Table newTable) { if (!dbName.equals(newTable.getDbName()) || !tblName.equals(newTable.getTableName())) { List partitions = listCachedPartitions(dbName, tblName, -1); - Map newPartitionColStats = - new HashMap(); + Map newPartitionColStats = new HashMap<>(); for (Partition part : partitions) { String oldPartialPartitionKey = CacheUtils.buildKeyWithDelimit(dbName, tblName, part.getValues()); @@ -371,15 +370,14 @@ public synchronized List listCachedPartitions(String dbName, String t public synchronized void alterPartitionInCache(String dbName, String tblName, List partVals, Partition newPart) { removePartitionFromCache(dbName, tblName, partVals); - addPartitionToCache(HiveStringUtils.normalizeIdentifier(newPart.getDbName()), - HiveStringUtils.normalizeIdentifier(newPart.getTableName()), newPart); + addPartitionToCache(StringUtils.normalizeIdentifier(newPart.getDbName()), + StringUtils.normalizeIdentifier(newPart.getTableName()), newPart); } public synchronized void alterPartitionInColStatsCache(String dbName, String tblName, List partVals, Partition newPart) { String oldPartialPartitionKey = CacheUtils.buildKeyWithDelimit(dbName, tblName, partVals); - Map newPartitionColStats = - new HashMap(); + Map newPartitionColStats = new HashMap<>(); Iterator> iterator = partitionColStatsCache.entrySet().iterator(); while (iterator.hasNext()) { @@ -389,8 +387,8 @@ public synchronized void alterPartitionInColStatsCache(String dbName, String tbl if (key.toLowerCase().startsWith(oldPartialPartitionKey.toLowerCase())) { Object[] decomposedKey = CacheUtils.splitPartitionColStats(key); String newKey = - CacheUtils.buildKey(HiveStringUtils.normalizeIdentifier(newPart.getDbName()), - HiveStringUtils.normalizeIdentifier(newPart.getTableName()), newPart.getValues(), + CacheUtils.buildKey(StringUtils.normalizeIdentifier(newPart.getDbName()), + StringUtils.normalizeIdentifier(newPart.getTableName()), newPart.getValues(), (String) decomposedKey[3]); newPartitionColStats.put(newKey, colStatObj); iterator.remove(); diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/conf/MetastoreConf.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/conf/MetastoreConf.java index 5a6ef990ee5e..15165dabd3f1 100644 --- a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/conf/MetastoreConf.java +++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/conf/MetastoreConf.java @@ -591,6 +591,8 @@ public enum ConfVars { "Metastore SSL certificate truststore password."), STATS_AUTO_GATHER("metastore.stats.autogather", "hive.stats.autogather", true, "A flag to gather statistics (only basic) automatically during the INSERT OVERWRITE command."), + STATS_FETCH_BITVECTOR("metastore.stats.fetch.bitvector", "hive.stats.fetch.bitvector", false, + "Whether we fetch bitvector when we compute ndv. Users can turn it off if they want to use old schema"), STATS_NDV_TUNER("metastore.stats.ndv.tuner", "hive.metastore.stats.ndv.tuner", 0.0, "Provides a tunable parameter between the lower bound and the higher bound of ndv for aggregate ndv across all the partitions. \n" + "The lower bound is equal to the maximum of ndv of all the partitions. The higher bound is equal to the sum of ndv of all the partitions.\n" + diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/MetaStoreUtils.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/MetaStoreUtils.java index 074c067dad92..77790adfff3c 100644 --- a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/MetaStoreUtils.java +++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/MetaStoreUtils.java @@ -21,29 +21,46 @@ import com.google.common.collect.Lists; import com.google.common.collect.Maps; import com.google.common.util.concurrent.ThreadFactoryBuilder; +import org.apache.commons.lang.*; import org.apache.hadoop.hive.metastore.api.ColumnStatistics; import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj; import org.apache.hadoop.hive.metastore.api.Decimal; +import org.apache.hadoop.hive.metastore.api.FieldSchema; import org.apache.hadoop.hive.metastore.api.MetaException; +import org.apache.hadoop.hive.metastore.api.Order; +import org.apache.hadoop.hive.metastore.api.SerDeInfo; +import org.apache.hadoop.hive.metastore.api.SkewedInfo; +import org.apache.hadoop.hive.metastore.api.StorageDescriptor; +import org.apache.hadoop.hive.metastore.api.Table; import org.apache.hadoop.hive.metastore.columnstats.aggr.ColumnStatsAggregator; import org.apache.hadoop.hive.metastore.columnstats.aggr.ColumnStatsAggregatorFactory; +import org.apache.hadoop.hive.metastore.conf.MetastoreConf; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import javax.annotation.Nullable; import java.math.BigDecimal; import java.math.BigInteger; +import java.nio.charset.Charset; +import java.nio.charset.StandardCharsets; +import java.security.MessageDigest; import java.text.DateFormat; import java.text.SimpleDateFormat; import java.util.ArrayList; import java.util.HashMap; +import java.util.Iterator; import java.util.List; import java.util.Map; +import java.util.SortedMap; +import java.util.SortedSet; +import java.util.TreeMap; +import java.util.TreeSet; import java.util.concurrent.Callable; import java.util.concurrent.ExecutionException; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import java.util.concurrent.Future; +import java.util.regex.Pattern; public class MetaStoreUtils { /** A fixed date format to be used for hive partition column values. */ @@ -56,6 +73,7 @@ protected DateFormat initialValue() { return val; } }; + private static final Charset ENCODING = StandardCharsets.UTF_8; private static final Logger LOG = LoggerFactory.getLogger(MetaStoreUtils.class); /** @@ -216,4 +234,129 @@ public ColumnStatisticsObj call() throws Exception { public static double decimalToDouble(Decimal decimal) { return new BigDecimal(new BigInteger(decimal.getUnscaled()), decimal.getScale()).doubleValue(); } + + public static String[] getQualifiedName(String defaultDbName, String tableName) { + String[] names = tableName.split("\\."); + if (names.length == 1) { + return new String[] { defaultDbName, tableName}; + } + return names; + } + + public static void validatePartitionNameCharacters(List partVals, + Pattern partitionValidationPattern) throws MetaException { + + String invalidPartitionVal = getPartitionValWithInvalidCharacter(partVals, partitionValidationPattern); + if (invalidPartitionVal != null) { + throw new MetaException("Partition value '" + invalidPartitionVal + + "' contains a character " + "not matched by whitelist pattern '" + + partitionValidationPattern.toString() + "'. " + "(configure with " + + MetastoreConf.ConfVars.PARTITION_NAME_WHITELIST_PATTERN.varname + ")"); + } + } + + public static String getPartitionValWithInvalidCharacter(List partVals, + Pattern partitionValidationPattern) { + if (partitionValidationPattern == null) { + return null; + } + + for (String partVal : partVals) { + if (!partitionValidationPattern.matcher(partVal).matches()) { + return partVal; + } + } + + return null; + } + + /** + * Produce a hash for the storage descriptor + * @param sd storage descriptor to hash + * @param md message descriptor to use to generate the hash + * @return the hash as a byte array + */ + public static byte[] hashStorageDescriptor(StorageDescriptor sd, MessageDigest md) { + // Note all maps and lists have to be absolutely sorted. Otherwise we'll produce different + // results for hashes based on the OS or JVM being used. + md.reset(); + for (FieldSchema fs : sd.getCols()) { + md.update(fs.getName().getBytes(ENCODING)); + md.update(fs.getType().getBytes(ENCODING)); + if (fs.getComment() != null) md.update(fs.getComment().getBytes(ENCODING)); + } + if (sd.getInputFormat() != null) { + md.update(sd.getInputFormat().getBytes(ENCODING)); + } + if (sd.getOutputFormat() != null) { + md.update(sd.getOutputFormat().getBytes(ENCODING)); + } + md.update(sd.isCompressed() ? "true".getBytes(ENCODING) : "false".getBytes(ENCODING)); + md.update(Integer.toString(sd.getNumBuckets()).getBytes(ENCODING)); + if (sd.getSerdeInfo() != null) { + SerDeInfo serde = sd.getSerdeInfo(); + if (serde.getName() != null) { + md.update(serde.getName().getBytes(ENCODING)); + } + if (serde.getSerializationLib() != null) { + md.update(serde.getSerializationLib().getBytes(ENCODING)); + } + if (serde.getParameters() != null) { + SortedMap params = new TreeMap<>(serde.getParameters()); + for (Map.Entry param : params.entrySet()) { + md.update(param.getKey().getBytes(ENCODING)); + md.update(param.getValue().getBytes(ENCODING)); + } + } + } + if (sd.getBucketCols() != null) { + List bucketCols = new ArrayList<>(sd.getBucketCols()); + for (String bucket : bucketCols) md.update(bucket.getBytes(ENCODING)); + } + if (sd.getSortCols() != null) { + SortedSet orders = new TreeSet<>(sd.getSortCols()); + for (Order order : orders) { + md.update(order.getCol().getBytes(ENCODING)); + md.update(Integer.toString(order.getOrder()).getBytes(ENCODING)); + } + } + if (sd.getSkewedInfo() != null) { + SkewedInfo skewed = sd.getSkewedInfo(); + if (skewed.getSkewedColNames() != null) { + SortedSet colnames = new TreeSet<>(skewed.getSkewedColNames()); + for (String colname : colnames) md.update(colname.getBytes(ENCODING)); + } + if (skewed.getSkewedColValues() != null) { + SortedSet sortedOuterList = new TreeSet<>(); + for (List innerList : skewed.getSkewedColValues()) { + SortedSet sortedInnerList = new TreeSet<>(innerList); + sortedOuterList.add(org.apache.commons.lang.StringUtils.join(sortedInnerList, ".")); + } + for (String colval : sortedOuterList) md.update(colval.getBytes(ENCODING)); + } + if (skewed.getSkewedColValueLocationMaps() != null) { + SortedMap sortedMap = new TreeMap<>(); + for (Map.Entry, String> smap : skewed.getSkewedColValueLocationMaps().entrySet()) { + SortedSet sortedKey = new TreeSet<>(smap.getKey()); + sortedMap.put(org.apache.commons.lang.StringUtils.join(sortedKey, "."), smap.getValue()); + } + for (Map.Entry e : sortedMap.entrySet()) { + md.update(e.getKey().getBytes(ENCODING)); + md.update(e.getValue().getBytes(ENCODING)); + } + } + md.update(sd.isStoredAsSubDirectories() ? "true".getBytes(ENCODING) : "false".getBytes(ENCODING)); + } + + return md.digest(); + } + + public static List getColumnNamesForTable(Table table) { + List colNames = new ArrayList<>(); + Iterator colsIterator = table.getSd().getColsIterator(); + while (colsIterator.hasNext()) { + colNames.add(colsIterator.next().getName()); + } + return colNames; + } } diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/ObjectPair.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/ObjectPair.java new file mode 100644 index 000000000000..5b49a251f843 --- /dev/null +++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/ObjectPair.java @@ -0,0 +1,86 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.metastore.utils; + + + +public class ObjectPair { + private F first; + private S second; + + public ObjectPair() {} + + /** + * Creates a pair. Constructor doesn't infer template args but + * the method does, so the code becomes less ugly. + */ + public static ObjectPair create(T1 f, T2 s) { + return new ObjectPair<>(f, s); + } + + public ObjectPair(F first, S second) { + this.first = first; + this.second = second; + } + + public F getFirst() { + return first; + } + + public void setFirst(F first) { + this.first = first; + } + + public S getSecond() { + return second; + } + + public void setSecond(S second) { + this.second = second; + } + + @Override + public boolean equals(Object that) { + if (that == null) { + return false; + } + if (that instanceof ObjectPair) { + return this.equals((ObjectPair)that); + } + return false; + } + + public boolean equals(ObjectPair that) { + if (that == null) { + return false; + } + + return this.getFirst().equals(that.getFirst()) && + this.getSecond().equals(that.getSecond()); + } + + @Override + public int hashCode() { + return first.hashCode() * 31 + second.hashCode(); + } + + public String toString() { + return first + ":" + second; + } +} diff --git a/common/src/test/org/apache/hadoop/hive/common/TestStatsSetupConst.java b/standalone-metastore/src/test/java/org/apache/hadoop/hive/common/TestStatsSetupConst.java similarity index 99% rename from common/src/test/org/apache/hadoop/hive/common/TestStatsSetupConst.java rename to standalone-metastore/src/test/java/org/apache/hadoop/hive/common/TestStatsSetupConst.java index 792b8626dad1..883e2bdcdbe2 100644 --- a/common/src/test/org/apache/hadoop/hive/common/TestStatsSetupConst.java +++ b/standalone-metastore/src/test/java/org/apache/hadoop/hive/common/TestStatsSetupConst.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/metastore/src/test/org/apache/hadoop/hive/metastore/TestObjectStore.java b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestObjectStore.java similarity index 94% rename from metastore/src/test/org/apache/hadoop/hive/metastore/TestObjectStore.java rename to standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestObjectStore.java index 64b2c704b3d6..35a4aade4f30 100644 --- a/metastore/src/test/org/apache/hadoop/hive/metastore/TestObjectStore.java +++ b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestObjectStore.java @@ -1,27 +1,27 @@ /* - Licensed to the Apache Software Foundation (ASF) under one - or more contributor license agreements. See the NOTICE file - distributed with this work for additional information - regarding copyright ownership. The ASF licenses this file - to you under the Apache License, Version 2.0 (the - "License"); you may not use this file except in compliance - with the License. You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hive.metastore; import com.codahale.metrics.Counter; import com.google.common.collect.ImmutableList; -import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.metastore.ObjectStore.RetryingExecutor; +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hive.metastore.api.CurrentNotificationEventId; import org.apache.hadoop.hive.metastore.api.Database; import org.apache.hadoop.hive.metastore.api.FieldSchema; @@ -43,7 +43,7 @@ Licensed to the Apache Software Foundation (ASF) under one import org.apache.hadoop.hive.metastore.api.SerDeInfo; import org.apache.hadoop.hive.metastore.api.StorageDescriptor; import org.apache.hadoop.hive.metastore.api.Table; -import org.apache.hadoop.hive.metastore.messaging.EventMessage; +import org.apache.hadoop.hive.metastore.conf.MetastoreConf; import org.apache.hadoop.hive.metastore.metrics.Metrics; import org.apache.hadoop.hive.metastore.metrics.MetricsConstants; import org.apache.hadoop.hive.metastore.model.MNotificationLog; @@ -119,8 +119,9 @@ public FileFormatProxy getFileFormatProxy(FileMetadataExprType type) { @Before public void setUp() throws Exception { - HiveConf conf = new HiveConf(); - conf.setVar(HiveConf.ConfVars.METASTORE_EXPRESSION_PROXY_CLASS, MockPartitionExpressionProxy.class.getName()); + Configuration conf = MetastoreConf.newMetastoreConf(); + MetastoreConf.setVar(conf, MetastoreConf.ConfVars.EXPRESSION_PROXY_CLASS, + MockPartitionExpressionProxy.class.getName()); objectStore = new ObjectStore(); objectStore.setConf(conf); @@ -134,6 +135,8 @@ public void tearDown() { /** * Test notification operations */ + // TODO MS-SPLIT uncomment once we move EventMessage over + /* @Test public void testNotificationOps() throws InterruptedException { final int NO_EVENT_ID = 0; @@ -180,6 +183,7 @@ public void testNotificationOps() throws InterruptedException { eventResponse = objectStore.getNextNotification(new NotificationEventRequest()); Assert.assertEquals(0, eventResponse.getEventsSize()); } + */ /** * Test database operations @@ -368,8 +372,8 @@ public void testRoleOps() throws InvalidObjectException, MetaException, NoSuchOb @Test public void testDirectSqlErrorMetrics() throws Exception { - HiveConf conf = new HiveConf(); - conf.setBoolVar(HiveConf.ConfVars.HIVE_SERVER2_METRICS_ENABLED, true); + Configuration conf = MetastoreConf.newMetastoreConf(); + MetastoreConf.setBoolVar(conf, MetastoreConf.ConfVars.METRICS_ENABLED, true); Metrics.initialize(conf); conf.setVar(HiveConf.ConfVars.HIVE_CODAHALE_METRICS_REPORTER_CLASSES, "org.apache.hadoop.hive.common.metrics.metrics2.JsonFileMetricsReporter, " + diff --git a/metastore/src/test/org/apache/hadoop/hive/metastore/cache/TestCachedStore.java b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/cache/TestCachedStore.java similarity index 87% rename from metastore/src/test/org/apache/hadoop/hive/metastore/cache/TestCachedStore.java rename to standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/cache/TestCachedStore.java index b6d2df59cdca..d486f7c7a93d 100644 --- a/metastore/src/test/org/apache/hadoop/hive/metastore/cache/TestCachedStore.java +++ b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/cache/TestCachedStore.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -23,8 +23,8 @@ import java.util.List; import java.util.Map; +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hive.common.ndv.hll.HyperLogLog; -import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.metastore.ObjectStore; import org.apache.hadoop.hive.metastore.TableType; import org.apache.hadoop.hive.metastore.TestObjectStore.MockPartitionExpressionProxy; @@ -44,6 +44,7 @@ import org.apache.hadoop.hive.metastore.api.Table; import org.apache.hadoop.hive.metastore.columnstats.cache.LongColumnStatsDataInspector; import org.apache.hadoop.hive.metastore.columnstats.cache.StringColumnStatsDataInspector; +import org.apache.hadoop.hive.metastore.conf.MetastoreConf; import org.junit.Assert; import org.junit.Before; import org.junit.Test; @@ -56,9 +57,9 @@ public class TestCachedStore { @Before public void setUp() throws Exception { - HiveConf conf = new HiveConf(); - conf.setBoolean(HiveConf.ConfVars.HIVE_IN_TEST.varname, true); - conf.setVar(HiveConf.ConfVars.METASTORE_EXPRESSION_PROXY_CLASS, + Configuration conf = MetastoreConf.newMetastoreConf(); + MetastoreConf.setBoolVar(conf, MetastoreConf.ConfVars.HIVE_IN_TEST, true); + MetastoreConf.setVar(conf, MetastoreConf.ConfVars.EXPRESSION_PROXY_CLASS, MockPartitionExpressionProxy.class.getName()); objectStore = new ObjectStore(); objectStore.setConf(conf); @@ -88,7 +89,7 @@ public void testDatabaseOps() throws Exception { String dbName = "testDatabaseOps"; String dbDescription = "testDatabaseOps"; String dbLocation = "file:/tmp"; - Map dbParams = new HashMap(); + Map dbParams = new HashMap<>(); String dbOwner = "user1"; Database db = new Database(dbName, dbDescription, dbLocation, dbParams); db.setOwnerName(dbOwner); @@ -179,7 +180,7 @@ public void testTableOps() throws Exception { String dbName = "testTableOps"; String dbDescription = "testTableOps"; String dbLocation = "file:/tmp"; - Map dbParams = new HashMap(); + Map dbParams = new HashMap<>(); String dbOwner = "user1"; Database db = new Database(dbName, dbDescription, dbLocation, dbParams); db.setOwnerName(dbOwner); @@ -193,18 +194,18 @@ public void testTableOps() throws Exception { String serdeLocation = "file:/tmp"; FieldSchema col1 = new FieldSchema("col1", "int", "integer column"); FieldSchema col2 = new FieldSchema("col2", "string", "string column"); - List cols = new ArrayList(); + List cols = new ArrayList<>(); cols.add(col1); cols.add(col2); - Map serdeParams = new HashMap(); - Map tblParams = new HashMap(); - SerDeInfo serdeInfo = new SerDeInfo("serde", "seriallib", new HashMap()); + Map serdeParams = new HashMap<>(); + Map tblParams = new HashMap<>(); + SerDeInfo serdeInfo = new SerDeInfo("serde", "seriallib", new HashMap<>()); StorageDescriptor sd = new StorageDescriptor(cols, serdeLocation, "input", "output", false, 0, serdeInfo, null, null, serdeParams); sd.setStoredAsSubDirectories(false); Table tbl = - new Table(tblName, dbName, tblOwner, 0, 0, 0, sd, new ArrayList(), tblParams, + new Table(tblName, dbName, tblOwner, 0, 0, 0, sd, new ArrayList<>(), tblParams, null, null, TableType.MANAGED_TABLE.toString()); objectStore.createTable(tbl); tbl = objectStore.getTable(dbName, tblName); @@ -221,7 +222,7 @@ public void testTableOps() throws Exception { // Add a new table via CachedStore String tblName1 = "tbl1"; Table tbl1 = - new Table(tblName1, dbName, tblOwner, 0, 0, 0, sd, new ArrayList(), tblParams, + new Table(tblName1, dbName, tblOwner, 0, 0, 0, sd, new ArrayList<>(), tblParams, null, null, TableType.MANAGED_TABLE.toString()); cachedStore.createTable(tbl1); tbl1 = cachedStore.getTable(dbName, tblName1); @@ -233,7 +234,7 @@ public void testTableOps() throws Exception { // Add a new table via ObjectStore String tblName2 = "tbl2"; Table tbl2 = - new Table(tblName2, dbName, tblOwner, 0, 0, 0, sd, new ArrayList(), tblParams, + new Table(tblName2, dbName, tblOwner, 0, 0, 0, sd, new ArrayList<>(), tblParams, null, null, TableType.MANAGED_TABLE.toString()); objectStore.createTable(tbl2); tbl2 = objectStore.getTable(dbName, tblName2); @@ -241,7 +242,7 @@ public void testTableOps() throws Exception { // Alter table "tbl" via ObjectStore tblOwner = "user2"; tbl = - new Table(tblName, dbName, tblOwner, 0, 0, 0, sd, new ArrayList(), tblParams, + new Table(tblName, dbName, tblOwner, 0, 0, 0, sd, new ArrayList<>(), tblParams, null, null, TableType.MANAGED_TABLE.toString()); objectStore.alterTable(dbName, tblName, tbl); tbl = objectStore.getTable(dbName, tblName); @@ -283,7 +284,7 @@ public void testPartitionOps() throws Exception { String dbName = "testPartitionOps"; String dbDescription = "testPartitionOps"; String dbLocation = "file:/tmp"; - Map dbParams = new HashMap(); + Map dbParams = new HashMap<>(); String dbOwner = "user1"; Database db = new Database(dbName, dbDescription, dbLocation, dbParams); db.setOwnerName(dbOwner); @@ -297,17 +298,17 @@ public void testPartitionOps() throws Exception { String serdeLocation = "file:/tmp"; FieldSchema col1 = new FieldSchema("col1", "int", "integer column"); FieldSchema col2 = new FieldSchema("col2", "string", "string column"); - List cols = new ArrayList(); + List cols = new ArrayList<>(); cols.add(col1); cols.add(col2); - Map serdeParams = new HashMap(); - Map tblParams = new HashMap(); + Map serdeParams = new HashMap<>(); + Map tblParams = new HashMap<>(); SerDeInfo serdeInfo = new SerDeInfo("serde", "seriallib", null); StorageDescriptor sd = new StorageDescriptor(cols, serdeLocation, "input", "output", false, 0, serdeInfo, null, null, serdeParams); FieldSchema ptnCol1 = new FieldSchema("part1", "string", "string partition column"); - List ptnCols = new ArrayList(); + List ptnCols = new ArrayList<>(); ptnCols.add(ptnCol1); Table tbl = new Table(tblName, dbName, tblOwner, 0, 0, 0, sd, ptnCols, tblParams, null, null, @@ -315,7 +316,7 @@ public void testPartitionOps() throws Exception { objectStore.createTable(tbl); tbl = objectStore.getTable(dbName, tblName); final String ptnColVal1 = "aaa"; - Map partParams = new HashMap(); + Map partParams = new HashMap<>(); Partition ptn1 = new Partition(Arrays.asList(ptnColVal1), dbName, tblName, 0, 0, sd, partParams); objectStore.addPartition(ptn1); @@ -384,7 +385,7 @@ public void testTableColStatsOps() throws Exception { String dbName = "testTableColStatsOps"; String dbDescription = "testTableColStatsOps"; String dbLocation = "file:/tmp"; - Map dbParams = new HashMap(); + Map dbParams = new HashMap<>(); String dbOwner = "user1"; Database db = new Database(dbName, dbDescription, dbLocation, dbParams); db.setOwnerName(dbOwner); @@ -413,18 +414,18 @@ public void testTableColStatsOps() throws Exception { long col3NumTrues = 100; long col3NumFalses = 30; long col3Nulls = 10; - final List cols = new ArrayList(); + final List cols = new ArrayList<>(); cols.add(col1); cols.add(col2); cols.add(col3); - Map serdeParams = new HashMap(); - Map tblParams = new HashMap(); + Map serdeParams = new HashMap<>(); + Map tblParams = new HashMap<>(); final SerDeInfo serdeInfo = new SerDeInfo("serde", "seriallib", null); StorageDescriptor sd = new StorageDescriptor(cols, serdeLocation, "input", "output", false, 0, serdeInfo, null, null, serdeParams); Table tbl = - new Table(tblName, dbName, tblOwner, 0, 0, 0, sd, new ArrayList(), tblParams, + new Table(tblName, dbName, tblOwner, 0, 0, 0, sd, new ArrayList<>(), tblParams, null, null, TableType.MANAGED_TABLE.toString()); objectStore.createTable(tbl); tbl = objectStore.getTable(dbName, tblName); @@ -432,7 +433,7 @@ public void testTableColStatsOps() throws Exception { // Add ColumnStatistics for tbl to metastore DB via ObjectStore ColumnStatistics stats = new ColumnStatistics(); ColumnStatisticsDesc statsDesc = new ColumnStatisticsDesc(true, dbName, tblName); - List colStatObjs = new ArrayList(); + List colStatObjs = new ArrayList<>(); // Col1 ColumnStatisticsData data1 = new ColumnStatisticsData(); @@ -530,53 +531,53 @@ public void testSharedStoreDb() { public void testSharedStoreTable() { Table tbl1 = new Table(); StorageDescriptor sd1 = new StorageDescriptor(); - List cols1 = new ArrayList(); + List cols1 = new ArrayList<>(); cols1.add(new FieldSchema("col1", "int", "")); - Map params1 = new HashMap(); + Map params1 = new HashMap<>(); params1.put("key", "value"); sd1.setCols(cols1); sd1.setParameters(params1); sd1.setLocation("loc1"); tbl1.setSd(sd1); - tbl1.setPartitionKeys(new ArrayList()); + tbl1.setPartitionKeys(new ArrayList<>()); Table tbl2 = new Table(); StorageDescriptor sd2 = new StorageDescriptor(); - List cols2 = new ArrayList(); + List cols2 = new ArrayList<>(); cols2.add(new FieldSchema("col1", "int", "")); - Map params2 = new HashMap(); + Map params2 = new HashMap<>(); params2.put("key", "value"); sd2.setCols(cols2); sd2.setParameters(params2); sd2.setLocation("loc2"); tbl2.setSd(sd2); - tbl2.setPartitionKeys(new ArrayList()); + tbl2.setPartitionKeys(new ArrayList<>()); Table tbl3 = new Table(); StorageDescriptor sd3 = new StorageDescriptor(); - List cols3 = new ArrayList(); + List cols3 = new ArrayList<>(); cols3.add(new FieldSchema("col3", "int", "")); - Map params3 = new HashMap(); + Map params3 = new HashMap<>(); params3.put("key2", "value2"); sd3.setCols(cols3); sd3.setParameters(params3); sd3.setLocation("loc3"); tbl3.setSd(sd3); - tbl3.setPartitionKeys(new ArrayList()); + tbl3.setPartitionKeys(new ArrayList<>()); Table newTbl1 = new Table(); newTbl1.setDbName("db2"); newTbl1.setTableName("tbl1"); StorageDescriptor newSd1 = new StorageDescriptor(); - List newCols1 = new ArrayList(); + List newCols1 = new ArrayList<>(); newCols1.add(new FieldSchema("newcol1", "int", "")); - Map newParams1 = new HashMap(); + Map newParams1 = new HashMap<>(); newParams1.put("key", "value"); newSd1.setCols(newCols1); newSd1.setParameters(params1); newSd1.setLocation("loc1"); newTbl1.setSd(newSd1); - newTbl1.setPartitionKeys(new ArrayList()); + newTbl1.setPartitionKeys(new ArrayList<>()); sharedCache.addTableToCache("db1", "tbl1", tbl1); sharedCache.addTableToCache("db1", "tbl2", tbl2); @@ -607,9 +608,9 @@ public void testSharedStoreTable() { public void testSharedStorePartition() { Partition part1 = new Partition(); StorageDescriptor sd1 = new StorageDescriptor(); - List cols1 = new ArrayList(); + List cols1 = new ArrayList<>(); cols1.add(new FieldSchema("col1", "int", "")); - Map params1 = new HashMap(); + Map params1 = new HashMap<>(); params1.put("key", "value"); sd1.setCols(cols1); sd1.setParameters(params1); @@ -619,9 +620,9 @@ public void testSharedStorePartition() { Partition part2 = new Partition(); StorageDescriptor sd2 = new StorageDescriptor(); - List cols2 = new ArrayList(); + List cols2 = new ArrayList<>(); cols2.add(new FieldSchema("col1", "int", "")); - Map params2 = new HashMap(); + Map params2 = new HashMap<>(); params2.put("key", "value"); sd2.setCols(cols2); sd2.setParameters(params2); @@ -631,9 +632,9 @@ public void testSharedStorePartition() { Partition part3 = new Partition(); StorageDescriptor sd3 = new StorageDescriptor(); - List cols3 = new ArrayList(); + List cols3 = new ArrayList<>(); cols3.add(new FieldSchema("col3", "int", "")); - Map params3 = new HashMap(); + Map params3 = new HashMap<>(); params3.put("key2", "value2"); sd3.setCols(cols3); sd3.setParameters(params3); @@ -645,9 +646,9 @@ public void testSharedStorePartition() { newPart1.setDbName("db1"); newPart1.setTableName("tbl1"); StorageDescriptor newSd1 = new StorageDescriptor(); - List newCols1 = new ArrayList(); + List newCols1 = new ArrayList<>(); newCols1.add(new FieldSchema("newcol1", "int", "")); - Map newParams1 = new HashMap(); + Map newParams1 = new HashMap<>(); newParams1.put("key", "value"); newSd1.setCols(newCols1); newSd1.setParameters(params1); @@ -688,35 +689,35 @@ public void testAggrStatsRepeatedRead() throws Exception { Database db = new Database(dbName, null, "some_location", null); cachedStore.createDatabase(db); - List cols = new ArrayList(); + List cols = new ArrayList<>(); cols.add(new FieldSchema(colName, "int", null)); - List partCols = new ArrayList(); + List partCols = new ArrayList<>(); partCols.add(new FieldSchema("col", "int", null)); StorageDescriptor sd = - new StorageDescriptor(cols, null, "input", "output", false, 0, new SerDeInfo("serde", "seriallib", new HashMap()), + new StorageDescriptor(cols, null, "input", "output", false, 0, new SerDeInfo("serde", "seriallib", new HashMap<>()), null, null, null); Table tbl = - new Table(tblName, dbName, null, 0, 0, 0, sd, partCols, new HashMap(), + new Table(tblName, dbName, null, 0, 0, 0, sd, partCols, new HashMap<>(), null, null, TableType.MANAGED_TABLE.toString()); cachedStore.createTable(tbl); - List partVals1 = new ArrayList(); + List partVals1 = new ArrayList<>(); partVals1.add("1"); - List partVals2 = new ArrayList(); + List partVals2 = new ArrayList<>(); partVals2.add("2"); Partition ptn1 = - new Partition(partVals1, dbName, tblName, 0, 0, sd, new HashMap()); + new Partition(partVals1, dbName, tblName, 0, 0, sd, new HashMap<>()); cachedStore.addPartition(ptn1); Partition ptn2 = - new Partition(partVals2, dbName, tblName, 0, 0, sd, new HashMap()); + new Partition(partVals2, dbName, tblName, 0, 0, sd, new HashMap<>()); cachedStore.addPartition(ptn2); ColumnStatistics stats = new ColumnStatistics(); ColumnStatisticsDesc statsDesc = new ColumnStatisticsDesc(true, dbName, tblName); statsDesc.setPartName("col"); - List colStatObjs = new ArrayList(); + List colStatObjs = new ArrayList<>(); ColumnStatisticsData data = new ColumnStatisticsData(); ColumnStatisticsObj colStats = new ColumnStatisticsObj(colName, "int", data); @@ -734,9 +735,9 @@ public void testAggrStatsRepeatedRead() throws Exception { cachedStore.updatePartitionColumnStatistics(stats.deepCopy(), partVals1); cachedStore.updatePartitionColumnStatistics(stats.deepCopy(), partVals2); - List colNames = new ArrayList(); + List colNames = new ArrayList<>(); colNames.add(colName); - List aggrPartVals = new ArrayList(); + List aggrPartVals = new ArrayList<>(); aggrPartVals.add("1"); aggrPartVals.add("2"); AggrStats aggrStats = cachedStore.get_aggr_stats_for(dbName, tblName, aggrPartVals, colNames); @@ -754,35 +755,35 @@ public void testPartitionAggrStats() throws Exception { Database db = new Database(dbName, null, "some_location", null); cachedStore.createDatabase(db); - List cols = new ArrayList(); + List cols = new ArrayList<>(); cols.add(new FieldSchema(colName, "int", null)); - List partCols = new ArrayList(); + List partCols = new ArrayList<>(); partCols.add(new FieldSchema("col", "int", null)); StorageDescriptor sd = - new StorageDescriptor(cols, null, "input", "output", false, 0, new SerDeInfo("serde", "seriallib", new HashMap()), + new StorageDescriptor(cols, null, "input", "output", false, 0, new SerDeInfo("serde", "seriallib", new HashMap<>()), null, null, null); Table tbl = - new Table(tblName, dbName, null, 0, 0, 0, sd, partCols, new HashMap(), + new Table(tblName, dbName, null, 0, 0, 0, sd, partCols, new HashMap<>(), null, null, TableType.MANAGED_TABLE.toString()); cachedStore.createTable(tbl); - List partVals1 = new ArrayList(); + List partVals1 = new ArrayList<>(); partVals1.add("1"); - List partVals2 = new ArrayList(); + List partVals2 = new ArrayList<>(); partVals2.add("2"); Partition ptn1 = - new Partition(partVals1, dbName, tblName, 0, 0, sd, new HashMap()); + new Partition(partVals1, dbName, tblName, 0, 0, sd, new HashMap<>()); cachedStore.addPartition(ptn1); Partition ptn2 = - new Partition(partVals2, dbName, tblName, 0, 0, sd, new HashMap()); + new Partition(partVals2, dbName, tblName, 0, 0, sd, new HashMap<>()); cachedStore.addPartition(ptn2); ColumnStatistics stats = new ColumnStatistics(); ColumnStatisticsDesc statsDesc = new ColumnStatisticsDesc(true, dbName, tblName); statsDesc.setPartName("col"); - List colStatObjs = new ArrayList(); + List colStatObjs = new ArrayList<>(); ColumnStatisticsData data = new ColumnStatisticsData(); ColumnStatisticsObj colStats = new ColumnStatisticsObj(colName, "int", data); @@ -802,9 +803,9 @@ public void testPartitionAggrStats() throws Exception { longStats.setNumDVs(40); cachedStore.updatePartitionColumnStatistics(stats.deepCopy(), partVals2); - List colNames = new ArrayList(); + List colNames = new ArrayList<>(); colNames.add(colName); - List aggrPartVals = new ArrayList(); + List aggrPartVals = new ArrayList<>(); aggrPartVals.add("1"); aggrPartVals.add("2"); AggrStats aggrStats = cachedStore.get_aggr_stats_for(dbName, tblName, aggrPartVals, colNames); @@ -824,35 +825,35 @@ public void testPartitionAggrStatsBitVector() throws Exception { Database db = new Database(dbName, null, "some_location", null); cachedStore.createDatabase(db); - List cols = new ArrayList(); + List cols = new ArrayList<>(); cols.add(new FieldSchema(colName, "int", null)); - List partCols = new ArrayList(); + List partCols = new ArrayList<>(); partCols.add(new FieldSchema("col", "int", null)); StorageDescriptor sd = - new StorageDescriptor(cols, null, "input", "output", false, 0, new SerDeInfo("serde", "seriallib", new HashMap()), + new StorageDescriptor(cols, null, "input", "output", false, 0, new SerDeInfo("serde", "seriallib", new HashMap<>()), null, null, null); Table tbl = - new Table(tblName, dbName, null, 0, 0, 0, sd, partCols, new HashMap(), + new Table(tblName, dbName, null, 0, 0, 0, sd, partCols, new HashMap<>(), null, null, TableType.MANAGED_TABLE.toString()); cachedStore.createTable(tbl); - List partVals1 = new ArrayList(); + List partVals1 = new ArrayList<>(); partVals1.add("1"); - List partVals2 = new ArrayList(); + List partVals2 = new ArrayList<>(); partVals2.add("2"); Partition ptn1 = - new Partition(partVals1, dbName, tblName, 0, 0, sd, new HashMap()); + new Partition(partVals1, dbName, tblName, 0, 0, sd, new HashMap<>()); cachedStore.addPartition(ptn1); Partition ptn2 = - new Partition(partVals2, dbName, tblName, 0, 0, sd, new HashMap()); + new Partition(partVals2, dbName, tblName, 0, 0, sd, new HashMap<>()); cachedStore.addPartition(ptn2); ColumnStatistics stats = new ColumnStatistics(); ColumnStatisticsDesc statsDesc = new ColumnStatisticsDesc(true, dbName, tblName); statsDesc.setPartName("col"); - List colStatObjs = new ArrayList(); + List colStatObjs = new ArrayList<>(); ColumnStatisticsData data = new ColumnStatisticsData(); ColumnStatisticsObj colStats = new ColumnStatisticsObj(colName, "int", data); @@ -886,9 +887,9 @@ public void testPartitionAggrStatsBitVector() throws Exception { cachedStore.updatePartitionColumnStatistics(stats.deepCopy(), partVals2); - List colNames = new ArrayList(); + List colNames = new ArrayList<>(); colNames.add(colName); - List aggrPartVals = new ArrayList(); + List aggrPartVals = new ArrayList<>(); aggrPartVals.add("1"); aggrPartVals.add("2"); AggrStats aggrStats = cachedStore.get_aggr_stats_for(dbName, tblName, aggrPartVals, colNames); From bd820149b18ef7157e721e24935e3d76c27f59fd Mon Sep 17 00:00:00 2001 From: Alan Gates Date: Mon, 28 Aug 2017 15:16:02 -0700 Subject: [PATCH 08/13] Moved RawStoreProxy and associated test. --- .../hadoop/hive/metastore/RawStoreProxy.java | 26 ++++++++----------- .../hive/metastore/TestRawStoreProxy.java | 10 +++---- 2 files changed, 16 insertions(+), 20 deletions(-) rename {metastore/src => standalone-metastore/src/main}/java/org/apache/hadoop/hive/metastore/RawStoreProxy.java (83%) rename {metastore/src/test => standalone-metastore/src/test/java}/org/apache/hadoop/hive/metastore/TestRawStoreProxy.java (87%) diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/RawStoreProxy.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/RawStoreProxy.java similarity index 83% rename from metastore/src/java/org/apache/hadoop/hive/metastore/RawStoreProxy.java rename to standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/RawStoreProxy.java index c5e117d8a909..2fd22683ef81 100644 --- a/metastore/src/java/org/apache/hadoop/hive/metastore/RawStoreProxy.java +++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/RawStoreProxy.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -27,11 +27,12 @@ import java.util.concurrent.TimeUnit; import org.apache.commons.lang.ClassUtils; +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hive.common.classification.InterfaceAudience; -import org.apache.hadoop.hive.common.classification.InterfaceStability; -import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.metastore.api.MetaException; +import org.apache.hadoop.hive.metastore.conf.MetastoreConf; +import org.apache.hadoop.hive.metastore.utils.JavaUtils; import org.apache.hadoop.util.ReflectionUtils; @InterfaceAudience.Private @@ -41,16 +42,16 @@ public class RawStoreProxy implements InvocationHandler { private final RawStore base; private final MetaStoreInit.MetaStoreInitData metaStoreInitData = new MetaStoreInit.MetaStoreInitData(); - private final HiveConf hiveConf; + private final Configuration hiveConf; private final Configuration conf; // thread local conf from HMS private final long socketTimeout; - protected RawStoreProxy(HiveConf hiveConf, Configuration conf, + protected RawStoreProxy(Configuration hiveConf, Configuration conf, Class rawStoreClass, int id) throws MetaException { this.conf = conf; this.hiveConf = hiveConf; - this.socketTimeout = HiveConf.getTimeVar(hiveConf, - HiveConf.ConfVars.METASTORE_CLIENT_SOCKET_TIMEOUT, TimeUnit.MILLISECONDS); + this.socketTimeout = MetastoreConf.getTimeVar(hiveConf, + MetastoreConf.ConfVars.CLIENT_SOCKET_TIMEOUT, TimeUnit.MILLISECONDS); // This has to be called before initializing the instance of RawStore init(); @@ -58,11 +59,10 @@ protected RawStoreProxy(HiveConf hiveConf, Configuration conf, this.base = ReflectionUtils.newInstance(rawStoreClass, conf); } - public static RawStore getProxy(HiveConf hiveConf, Configuration conf, String rawStoreClassName, + public static RawStore getProxy(Configuration hiveConf, Configuration conf, String rawStoreClassName, int id) throws MetaException { - Class baseClass = (Class) MetaStoreUtils.getClass( - rawStoreClassName); + Class baseClass = JavaUtils.getClass(rawStoreClassName, RawStore.class); RawStoreProxy handler = new RawStoreProxy(hiveConf, conf, baseClass, id); @@ -88,10 +88,6 @@ private void init() throws MetaException { MetaStoreInit.updateConnectionURL(hiveConf, getConf(), null, metaStoreInitData); } - private void initMS() { - base.setConf(getConf()); - } - @Override public Object invoke(Object proxy, Method method, Object[] args) throws Throwable { try { diff --git a/metastore/src/test/org/apache/hadoop/hive/metastore/TestRawStoreProxy.java b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestRawStoreProxy.java similarity index 87% rename from metastore/src/test/org/apache/hadoop/hive/metastore/TestRawStoreProxy.java rename to standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestRawStoreProxy.java index 68d65a86a845..25a6ecf3a144 100644 --- a/metastore/src/test/org/apache/hadoop/hive/metastore/TestRawStoreProxy.java +++ b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestRawStoreProxy.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -23,8 +23,8 @@ import java.util.concurrent.TimeUnit; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.metastore.api.MetaException; +import org.apache.hadoop.hive.metastore.conf.MetastoreConf; import org.junit.Test; public class TestRawStoreProxy { @@ -47,10 +47,10 @@ public void exceptions() throws IllegalStateException, MetaException { @Test public void testExceptionDispatch() throws Throwable { - HiveConf hiveConf = new HiveConf(); - hiveConf.setTimeVar(HiveConf.ConfVars.METASTORE_CLIENT_SOCKET_TIMEOUT, 10, + Configuration conf = MetastoreConf.newMetastoreConf(); + MetastoreConf.setTimeVar(conf, MetastoreConf.ConfVars.CLIENT_SOCKET_TIMEOUT, 10, TimeUnit.MILLISECONDS); - RawStoreProxy rsp = new RawStoreProxy(hiveConf, hiveConf, TestStore.class, 1); + RawStoreProxy rsp = new RawStoreProxy(conf, conf, TestStore.class, 1); try { rsp.invoke(null, TestStore.class.getMethod("exceptions"), new Object[] {}); fail("an exception is expected"); From fd985d15c76e65b913680cc5194867658acd4bc2 Mon Sep 17 00:00:00 2001 From: Alan Gates Date: Fri, 6 Oct 2017 14:51:23 -0700 Subject: [PATCH 09/13] Changes to make it compile after rebase. Also split TestObjectStore so that I could move the majority of it to standalone-metastore (since ObjectStore is now there) but left a couple of tests in metastore since they reference EventMessage which hasn't moved yet. --- .../hive/metastore/TestObjectStore2.java | 229 ++++++++++++++++++ .../hadoop/hive/metastore/ObjectStore.java | 17 +- .../hive/metastore/conf/MetastoreConf.java | 21 +- .../hive/metastore/TestObjectStore.java | 208 +++------------- 4 files changed, 282 insertions(+), 193 deletions(-) create mode 100644 metastore/src/test/org/apache/hadoop/hive/metastore/TestObjectStore2.java diff --git a/metastore/src/test/org/apache/hadoop/hive/metastore/TestObjectStore2.java b/metastore/src/test/org/apache/hadoop/hive/metastore/TestObjectStore2.java new file mode 100644 index 000000000000..fa4e02ac79f9 --- /dev/null +++ b/metastore/src/test/org/apache/hadoop/hive/metastore/TestObjectStore2.java @@ -0,0 +1,229 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.metastore; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hive.metastore.api.CurrentNotificationEventId; +import org.apache.hadoop.hive.metastore.api.FieldSchema; +import org.apache.hadoop.hive.metastore.api.FileMetadataExprType; +import org.apache.hadoop.hive.metastore.api.MetaException; +import org.apache.hadoop.hive.metastore.api.NotificationEvent; +import org.apache.hadoop.hive.metastore.api.NotificationEventRequest; +import org.apache.hadoop.hive.metastore.api.NotificationEventResponse; +import org.apache.hadoop.hive.metastore.conf.MetastoreConf; +import org.apache.hadoop.hive.metastore.messaging.EventMessage; +import org.apache.hadoop.hive.metastore.model.MNotificationLog; +import org.apache.hadoop.hive.metastore.model.MNotificationNextId; +import org.apache.hadoop.hive.ql.io.sarg.SearchArgument; +import org.junit.Before; +import org.junit.Ignore; +import org.junit.Test; +import org.slf4j.LoggerFactory; + +import java.util.List; +import java.util.concurrent.BrokenBarrierException; +import java.util.concurrent.CyclicBarrier; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.TimeUnit; + +import static org.apache.hadoop.hive.metastore.TestOldSchema.dropAllStoreObjects; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; + +// Tests from TestObjectStore that can't be moved yet due to references to EventMessage. Once +// EventMessage has been moved this should be recombined with TestObjectStore. + +public class TestObjectStore2 { + private ObjectStore objectStore = null; + + public static class MockPartitionExpressionProxy implements PartitionExpressionProxy { + @Override + public String convertExprToFilter(byte[] expr) throws MetaException { + return null; + } + + @Override + public boolean filterPartitionsByExpr(List partColumns, + byte[] expr, String defaultPartitionName, List partitionNames) + throws MetaException { + return false; + } + + @Override + public FileMetadataExprType getMetadataType(String inputFormat) { + return null; + } + + @Override + public SearchArgument createSarg(byte[] expr) { + return null; + } + + @Override + public FileFormatProxy getFileFormatProxy(FileMetadataExprType type) { + return null; + } + } + + @Before + public void setUp() throws Exception { + Configuration conf = MetastoreConf.newMetastoreConf(); + MetastoreConf.setVar(conf, MetastoreConf.ConfVars.EXPRESSION_PROXY_CLASS, + MockPartitionExpressionProxy.class.getName()); + + objectStore = new ObjectStore(); + objectStore.setConf(conf); + dropAllStoreObjects(objectStore); + } + + /** + * Test notification operations + */ + // TODO MS-SPLIT uncomment once we move EventMessage over + @Test + public void testNotificationOps() throws InterruptedException { + final int NO_EVENT_ID = 0; + final int FIRST_EVENT_ID = 1; + final int SECOND_EVENT_ID = 2; + + NotificationEvent event = + new NotificationEvent(0, 0, EventMessage.EventType.CREATE_DATABASE.toString(), ""); + NotificationEventResponse eventResponse; + CurrentNotificationEventId eventId; + + // Verify that there is no notifications available yet + eventId = objectStore.getCurrentNotificationEventId(); + assertEquals(NO_EVENT_ID, eventId.getEventId()); + + // Verify that addNotificationEvent() updates the NotificationEvent with the new event ID + objectStore.addNotificationEvent(event); + assertEquals(FIRST_EVENT_ID, event.getEventId()); + objectStore.addNotificationEvent(event); + assertEquals(SECOND_EVENT_ID, event.getEventId()); + + // Verify that objectStore fetches the latest notification event ID + eventId = objectStore.getCurrentNotificationEventId(); + assertEquals(SECOND_EVENT_ID, eventId.getEventId()); + + // Verify that getNextNotification() returns all events + eventResponse = objectStore.getNextNotification(new NotificationEventRequest()); + assertEquals(2, eventResponse.getEventsSize()); + assertEquals(FIRST_EVENT_ID, eventResponse.getEvents().get(0).getEventId()); + assertEquals(SECOND_EVENT_ID, eventResponse.getEvents().get(1).getEventId()); + + // Verify that getNextNotification(last) returns events after a specified event + eventResponse = objectStore.getNextNotification(new NotificationEventRequest(FIRST_EVENT_ID)); + assertEquals(1, eventResponse.getEventsSize()); + assertEquals(SECOND_EVENT_ID, eventResponse.getEvents().get(0).getEventId()); + + // Verify that getNextNotification(last) returns zero events if there are no more notifications available + eventResponse = objectStore.getNextNotification(new NotificationEventRequest(SECOND_EVENT_ID)); + assertEquals(0, eventResponse.getEventsSize()); + + // Verify that cleanNotificationEvents() cleans up all old notifications + Thread.sleep(1); + objectStore.cleanNotificationEvents(1); + eventResponse = objectStore.getNextNotification(new NotificationEventRequest()); + assertEquals(0, eventResponse.getEventsSize()); + } + + @Ignore( + "This test is here to allow testing with other databases like mysql / postgres etc\n" + + " with user changes to the code. This cannot be run on apache derby because of\n" + + " https://db.apache.org/derby/docs/10.10/devguide/cdevconcepts842385.html" + ) + @Test + public void testConcurrentAddNotifications() throws ExecutionException, InterruptedException { + + final int NUM_THREADS = 10; + CyclicBarrier cyclicBarrier = new CyclicBarrier(NUM_THREADS, + () -> LoggerFactory.getLogger("test") + .debug(NUM_THREADS + " threads going to add notification")); + + Configuration conf = MetastoreConf.newMetastoreConf(); + MetastoreConf.setVar(conf, MetastoreConf.ConfVars.EXPRESSION_PROXY_CLASS, + MockPartitionExpressionProxy.class.getName()); + /* + Below are the properties that need to be set based on what database this test is going to be run + */ + +// conf.setVar(HiveConf.ConfVars.METASTORE_CONNECTION_DRIVER, "com.mysql.jdbc.Driver"); +// conf.setVar(HiveConf.ConfVars.METASTORECONNECTURLKEY, +// "jdbc:mysql://localhost:3306/metastore_db"); +// conf.setVar(HiveConf.ConfVars.METASTORE_CONNECTION_USER_NAME, ""); +// conf.setVar(HiveConf.ConfVars.METASTOREPWD, ""); + + /* + we have to add this one manually as for tests the db is initialized via the metastoreDiretSQL + and we don't run the schema creation sql that includes the an insert for notification_sequence + which can be locked. the entry in notification_sequence happens via notification_event insertion. + */ + objectStore.getPersistenceManager().newQuery(MNotificationLog.class, "eventType==''").execute(); + objectStore.getPersistenceManager().newQuery(MNotificationNextId.class, "nextEventId==-1").execute(); + + objectStore.addNotificationEvent( + new NotificationEvent(0, 0, + EventMessage.EventType.CREATE_DATABASE.toString(), + "CREATE DATABASE DB initial")); + + ExecutorService executorService = Executors.newFixedThreadPool(NUM_THREADS); + for (int i = 0; i < NUM_THREADS; i++) { + final int n = i; + + executorService.execute( + () -> { + ObjectStore store = new ObjectStore(); + store.setConf(conf); + + String eventType = EventMessage.EventType.CREATE_DATABASE.toString(); + NotificationEvent dbEvent = + new NotificationEvent(0, 0, eventType, + "CREATE DATABASE DB" + n); + System.out.println("ADDING NOTIFICATION"); + + try { + cyclicBarrier.await(); + } catch (InterruptedException | BrokenBarrierException e) { + throw new RuntimeException(e); + } + store.addNotificationEvent(dbEvent); + System.out.println("FINISH NOTIFICATION"); + }); + } + executorService.shutdown(); + assertTrue(executorService.awaitTermination(15, TimeUnit.SECONDS)); + + // we have to setup this again as the underlying PMF keeps getting reinitialized with original + // reference closed + ObjectStore store = new ObjectStore(); + store.setConf(conf); + + NotificationEventResponse eventResponse = store.getNextNotification( + new NotificationEventRequest()); + assertEquals(NUM_THREADS + 1, eventResponse.getEventsSize()); + long previousId = 0; + for (NotificationEvent event : eventResponse.getEvents()) { + assertTrue("previous:" + previousId + " current:" + event.getEventId(), + previousId < event.getEventId()); + assertTrue(previousId + 1 == event.getEventId()); + previousId = event.getEventId(); + } + } +} diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java index 2f05388a8220..c04f5bffbc78 100644 --- a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java +++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java @@ -433,9 +433,7 @@ private void initializeHelper(Properties dsProps) { pm = getPersistenceManager(); try { String productName = MetaStoreDirectSql.getProductName(pm); - sqlGenerator = new SQLGenerator( - DatabaseProduct.determineDatabaseProduct(productName), - new HiveConf(hiveConf, ObjectStore.class)); + sqlGenerator = new SQLGenerator(DatabaseProduct.determineDatabaseProduct(productName), conf); } catch (SQLException e) { LOG.error("error trying to figure out the database product", e); throw new RuntimeException(e); @@ -8516,7 +8514,7 @@ public NotificationEventResponse getNextNotification(NotificationEventRequest rq private void lockForUpdate() throws MetaException { String selectQuery = "select \"NEXT_EVENT_ID\" from \"NOTIFICATION_SEQUENCE\""; String selectForUpdateQuery = sqlGenerator.addForUpdateClause(selectQuery); - new RetryingExecutor(hiveConf, () -> { + new RetryingExecutor(conf, () -> { Query query = pm.newQuery("javax.jdo.query.SQL", selectForUpdateQuery); query.setUnique(true); // only need to execute it to get db Lock @@ -8536,13 +8534,10 @@ interface Command { private final Command command; RetryingExecutor(Configuration config, Command command) { - this.maxRetries = config.getInt(ConfVars.NOTIFICATION_SEQUENCE_LOCK_MAX_RETRIES.name(), - ConfVars.NOTIFICATION_SEQUENCE_LOCK_MAX_RETRIES.defaultIntVal); - this.sleepInterval = config.getTimeDuration( - ConfVars.NOTIFICATION_SEQUENCE_LOCK_RETRY_SLEEP_INTERVAL.name(), - ConfVars.NOTIFICATION_SEQUENCE_LOCK_RETRY_SLEEP_INTERVAL.defaultLongVal, - TimeUnit.MILLISECONDS - ); + this.maxRetries = + MetastoreConf.getIntVar(config, ConfVars.NOTIFICATION_SEQUENCE_LOCK_MAX_RETRIES); + this.sleepInterval = MetastoreConf.getTimeVar(config, + ConfVars.NOTIFICATION_SEQUENCE_LOCK_RETRY_SLEEP_INTERVAL, TimeUnit.MILLISECONDS); this.command = command; } diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/conf/MetastoreConf.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/conf/MetastoreConf.java index 15165dabd3f1..5933318e64b2 100644 --- a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/conf/MetastoreConf.java +++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/conf/MetastoreConf.java @@ -499,12 +499,6 @@ public enum ConfVars { "A comma separated list of metrics reporters to start"), MULTITHREADED("javax.jdo.option.Multithreaded", "javax.jdo.option.Multithreaded", true, "Set this to true if multiple threads access metastore through JDO concurrently."), - ORM_RETRIEVE_MAPNULLS_AS_EMPTY_STRINGS("metastore.orm.retrieveMapNullsAsEmptyStrings", - "hive.metastore.orm.retrieveMapNullsAsEmptyStrings",false, - "Thrift does not support nulls in maps, so any nulls present in maps retrieved from ORM must " + - "either be pruned or converted to empty strings. Some backing dbs such as Oracle persist empty strings " + - "as nulls, so we should set this parameter if we wish to reverse that behaviour. For others, " + - "pruning is the correct behaviour"), MAX_OPEN_TXNS("metastore.max.open.txns", "hive.max.open.txns", 100000, "Maximum number of open transactions. If \n" + "current open transactions reach this limit, future open transaction requests will be \n" + @@ -512,6 +506,21 @@ public enum ConfVars { NON_TRANSACTIONAL_READ("javax.jdo.option.NonTransactionalRead", "javax.jdo.option.NonTransactionalRead", true, "Reads outside of transactions"), + NOTIFICATION_SEQUENCE_LOCK_MAX_RETRIES("metastore.notification.sequence.lock.max.retries", + "hive.notification.sequence.lock.max.retries", 5, + "Number of retries required to acquire a lock when getting the next notification sequential ID for entries " + + "in the NOTIFICATION_LOG table."), + NOTIFICATION_SEQUENCE_LOCK_RETRY_SLEEP_INTERVAL( + "metastore.notification.sequence.lock.retry.sleep.interval", + "hive.notification.sequence.lock.retry.sleep.interval", 500, TimeUnit.MILLISECONDS, + "Sleep interval between retries to acquire a notification lock as described part of property " + + NOTIFICATION_SEQUENCE_LOCK_MAX_RETRIES.name()), + ORM_RETRIEVE_MAPNULLS_AS_EMPTY_STRINGS("metastore.orm.retrieveMapNullsAsEmptyStrings", + "hive.metastore.orm.retrieveMapNullsAsEmptyStrings",false, + "Thrift does not support nulls in maps, so any nulls present in maps retrieved from ORM must " + + "either be pruned or converted to empty strings. Some backing dbs such as Oracle persist empty strings " + + "as nulls, so we should set this parameter if we wish to reverse that behaviour. For others, " + + "pruning is the correct behaviour"), PARTITION_NAME_WHITELIST_PATTERN("metastore.partition.name.whitelist.pattern", "hive.metastore.partition.name.whitelist.pattern", "", "Partition names will be checked against this regex pattern and rejected if not matched."), diff --git a/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestObjectStore.java b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestObjectStore.java index 35a4aade4f30..85ea3f280293 100644 --- a/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestObjectStore.java +++ b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestObjectStore.java @@ -22,7 +22,6 @@ import com.google.common.collect.ImmutableList; import org.apache.hadoop.hive.metastore.ObjectStore.RetryingExecutor; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hive.metastore.api.CurrentNotificationEventId; import org.apache.hadoop.hive.metastore.api.Database; import org.apache.hadoop.hive.metastore.api.FieldSchema; import org.apache.hadoop.hive.metastore.api.FileMetadataExprType; @@ -32,9 +31,6 @@ import org.apache.hadoop.hive.metastore.api.InvalidObjectException; import org.apache.hadoop.hive.metastore.api.MetaException; import org.apache.hadoop.hive.metastore.api.NoSuchObjectException; -import org.apache.hadoop.hive.metastore.api.NotificationEvent; -import org.apache.hadoop.hive.metastore.api.NotificationEventRequest; -import org.apache.hadoop.hive.metastore.api.NotificationEventResponse; import org.apache.hadoop.hive.metastore.api.Partition; import org.apache.hadoop.hive.metastore.api.PrincipalType; import org.apache.hadoop.hive.metastore.api.Role; @@ -46,13 +42,9 @@ import org.apache.hadoop.hive.metastore.conf.MetastoreConf; import org.apache.hadoop.hive.metastore.metrics.Metrics; import org.apache.hadoop.hive.metastore.metrics.MetricsConstants; -import org.apache.hadoop.hive.metastore.model.MNotificationLog; -import org.apache.hadoop.hive.metastore.model.MNotificationNextId; import org.apache.hadoop.hive.ql.io.sarg.SearchArgument; -import org.junit.After; import org.junit.Assert; import org.junit.Before; -import org.junit.Ignore; import org.junit.Test; import org.mockito.Mockito; import org.slf4j.Logger; @@ -64,15 +56,6 @@ import java.util.HashSet; import java.util.List; import java.util.Set; -import java.util.concurrent.BrokenBarrierException; -import java.util.concurrent.CyclicBarrier; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.Executors; -import java.util.concurrent.TimeUnit; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; public class TestObjectStore { private ObjectStore objectStore = null; @@ -96,7 +79,8 @@ public String convertExprToFilter(byte[] expr) throws MetaException { @Override public boolean filterPartitionsByExpr(List partColumns, - byte[] expr, String defaultPartitionName, List partitionNames) + byte[] expr, String defaultPartitionName, + List partitionNames) throws MetaException { return false; } @@ -128,68 +112,12 @@ public void setUp() throws Exception { dropAllStoreObjects(objectStore); } - @After - public void tearDown() { - } - - /** - * Test notification operations - */ - // TODO MS-SPLIT uncomment once we move EventMessage over - /* - @Test - public void testNotificationOps() throws InterruptedException { - final int NO_EVENT_ID = 0; - final int FIRST_EVENT_ID = 1; - final int SECOND_EVENT_ID = 2; - - NotificationEvent event = - new NotificationEvent(0, 0, EventMessage.EventType.CREATE_DATABASE.toString(), ""); - NotificationEventResponse eventResponse; - CurrentNotificationEventId eventId; - - // Verify that there is no notifications available yet - eventId = objectStore.getCurrentNotificationEventId(); - Assert.assertEquals(NO_EVENT_ID, eventId.getEventId()); - - // Verify that addNotificationEvent() updates the NotificationEvent with the new event ID - objectStore.addNotificationEvent(event); - Assert.assertEquals(FIRST_EVENT_ID, event.getEventId()); - objectStore.addNotificationEvent(event); - Assert.assertEquals(SECOND_EVENT_ID, event.getEventId()); - - // Verify that objectStore fetches the latest notification event ID - eventId = objectStore.getCurrentNotificationEventId(); - Assert.assertEquals(SECOND_EVENT_ID, eventId.getEventId()); - - // Verify that getNextNotification() returns all events - eventResponse = objectStore.getNextNotification(new NotificationEventRequest()); - Assert.assertEquals(2, eventResponse.getEventsSize()); - Assert.assertEquals(FIRST_EVENT_ID, eventResponse.getEvents().get(0).getEventId()); - Assert.assertEquals(SECOND_EVENT_ID, eventResponse.getEvents().get(1).getEventId()); - - // Verify that getNextNotification(last) returns events after a specified event - eventResponse = objectStore.getNextNotification(new NotificationEventRequest(FIRST_EVENT_ID)); - Assert.assertEquals(1, eventResponse.getEventsSize()); - Assert.assertEquals(SECOND_EVENT_ID, eventResponse.getEvents().get(0).getEventId()); - - // Verify that getNextNotification(last) returns zero events if there are no more notifications available - eventResponse = objectStore.getNextNotification(new NotificationEventRequest(SECOND_EVENT_ID)); - Assert.assertEquals(0, eventResponse.getEventsSize()); - - // Verify that cleanNotificationEvents() cleans up all old notifications - Thread.sleep(1); - objectStore.cleanNotificationEvents(1); - eventResponse = objectStore.getNextNotification(new NotificationEventRequest()); - Assert.assertEquals(0, eventResponse.getEventsSize()); - } - */ - /** * Test database operations */ @Test - public void testDatabaseOps() throws MetaException, InvalidObjectException, NoSuchObjectException { + public void testDatabaseOps() throws MetaException, InvalidObjectException, + NoSuchObjectException { Database db1 = new Database(DB1, "description", "locationurl", null); Database db2 = new Database(DB2, "description", "locationurl", null); objectStore.createDatabase(db1); @@ -213,25 +141,30 @@ public void testDatabaseOps() throws MetaException, InvalidObjectException, NoSu * Test table operations */ @Test - public void testTableOps() throws MetaException, InvalidObjectException, NoSuchObjectException, InvalidInputException { + public void testTableOps() throws MetaException, InvalidObjectException, NoSuchObjectException, + InvalidInputException { Database db1 = new Database(DB1, "description", "locationurl", null); objectStore.createDatabase(db1); - StorageDescriptor sd1 = new StorageDescriptor(ImmutableList.of(new FieldSchema("pk_col", "double", null)), + StorageDescriptor sd1 = + new StorageDescriptor(ImmutableList.of(new FieldSchema("pk_col", "double", null)), "location", null, null, false, 0, new SerDeInfo("SerDeName", "serializationLib", null), null, null, null); HashMap params = new HashMap<>(); params.put("EXTERNAL", "false"); - Table tbl1 = new Table(TABLE1, DB1, "owner", 1, 2, 3, sd1, null, params, null, null, "MANAGED_TABLE"); + Table tbl1 = + new Table(TABLE1, DB1, "owner", 1, 2, 3, sd1, null, params, null, null, "MANAGED_TABLE"); objectStore.createTable(tbl1); List tables = objectStore.getAllTables(DB1); Assert.assertEquals(1, tables.size()); Assert.assertEquals(TABLE1, tables.get(0)); - StorageDescriptor sd2 = new StorageDescriptor(ImmutableList.of(new FieldSchema("fk_col", "double", null)), + StorageDescriptor sd2 = + new StorageDescriptor(ImmutableList.of(new FieldSchema("fk_col", "double", null)), "location", null, null, false, 0, new SerDeInfo("SerDeName", "serializationLib", null), null, null, null); - Table newTbl1 = new Table("new" + TABLE1, DB1, "owner", 1, 2, 3, sd2, null, params, null, null, "MANAGED_TABLE"); + Table newTbl1 = new Table("new" + TABLE1, DB1, "owner", 1, 2, 3, sd2, null, params, null, null, + "MANAGED_TABLE"); objectStore.alterTable(DB1, TABLE1, newTbl1); tables = objectStore.getTables(DB1, "new*"); Assert.assertEquals(1, tables.size()); @@ -245,11 +178,11 @@ public void testTableOps() throws MetaException, InvalidObjectException, NoSuchO Assert.assertEquals(0, foreignKeys.size()); SQLPrimaryKey pk = new SQLPrimaryKey(DB1, TABLE1, "pk_col", 1, - "pk_const_1", false, false, false); + "pk_const_1", false, false, false); objectStore.addPrimaryKeys(ImmutableList.of(pk)); SQLForeignKey fk = new SQLForeignKey(DB1, TABLE1, "pk_col", - DB1, "new" + TABLE1, "fk_col", 1, - 0, 0, "fk_const_1", "pk_const_1", false, false, false); + DB1, "new" + TABLE1, "fk_col", 1, + 0, 0, "fk_const_1", "pk_const_1", false, false, false); objectStore.addForeignKeys(ImmutableList.of(fk)); // Retrieve from PK side @@ -259,7 +192,8 @@ public void testTableOps() throws MetaException, InvalidObjectException, NoSuchO List fks = objectStore.getForeignKeys(null, null, DB1, "new" + TABLE1); if (fks != null) { for (SQLForeignKey fkcol : fks) { - objectStore.dropConstraint(fkcol.getFktable_db(), fkcol.getFktable_name(), fkcol.getFk_name()); + objectStore.dropConstraint(fkcol.getFktable_db(), fkcol.getFktable_name(), + fkcol.getFk_name()); } } // Retrieve from FK side @@ -284,15 +218,19 @@ public void testTableOps() throws MetaException, InvalidObjectException, NoSuchO * Tests partition operations */ @Test - public void testPartitionOps() throws MetaException, InvalidObjectException, NoSuchObjectException, InvalidInputException { + public void testPartitionOps() throws MetaException, InvalidObjectException, + NoSuchObjectException, InvalidInputException { Database db1 = new Database(DB1, "description", "locationurl", null); objectStore.createDatabase(db1); - StorageDescriptor sd = new StorageDescriptor(null, "location", null, null, false, 0, new SerDeInfo("SerDeName", "serializationLib", null), null, null, null); + StorageDescriptor sd = new StorageDescriptor(null, "location", null, null, false, 0, + new SerDeInfo("SerDeName", "serializationLib", null), null, null, null); HashMap tableParams = new HashMap<>(); tableParams.put("EXTERNAL", "false"); FieldSchema partitionKey1 = new FieldSchema("Country", ColumnType.STRING_TYPE_NAME, ""); FieldSchema partitionKey2 = new FieldSchema("State", ColumnType.STRING_TYPE_NAME, ""); - Table tbl1 = new Table(TABLE1, DB1, "owner", 1, 2, 3, sd, Arrays.asList(partitionKey1, partitionKey2), tableParams, null, null, "MANAGED_TABLE"); + Table tbl1 = + new Table(TABLE1, DB1, "owner", 1, 2, 3, sd, Arrays.asList(partitionKey1, partitionKey2), + tableParams, null, null, "MANAGED_TABLE"); objectStore.createTable(tbl1); HashMap partitionParams = new HashMap<>(); partitionParams.put("PARTITION_LEVEL_PRIVILEGE", "true"); @@ -309,10 +247,10 @@ public void testPartitionOps() throws MetaException, InvalidObjectException, NoS Assert.assertEquals(111, partitions.get(0).getCreateTime()); Assert.assertEquals(222, partitions.get(1).getCreateTime()); - int numPartitions = objectStore.getNumPartitionsByFilter(DB1, TABLE1, ""); + int numPartitions = objectStore.getNumPartitionsByFilter(DB1, TABLE1, ""); Assert.assertEquals(partitions.size(), numPartitions); - numPartitions = objectStore.getNumPartitionsByFilter(DB1, TABLE1, "country = \"US\""); + numPartitions = objectStore.getNumPartitionsByFilter(DB1, TABLE1, "country = \"US\""); Assert.assertEquals(2, numPartitions); objectStore.dropPartition(DB1, TABLE1, value1); @@ -375,7 +313,7 @@ public void testDirectSqlErrorMetrics() throws Exception { Configuration conf = MetastoreConf.newMetastoreConf(); MetastoreConf.setBoolVar(conf, MetastoreConf.ConfVars.METRICS_ENABLED, true); Metrics.initialize(conf); - conf.setVar(HiveConf.ConfVars.HIVE_CODAHALE_METRICS_REPORTER_CLASSES, + MetastoreConf.setVar(conf, MetastoreConf.ConfVars.HIVE_CODAHALE_METRICS_REPORTER_CLASSES, "org.apache.hadoop.hive.common.metrics.metrics2.JsonFileMetricsReporter, " + "org.apache.hadoop.hive.common.metrics.metrics2.JmxMetricsReporter" ); @@ -479,93 +417,11 @@ public void testQueryCloseOnError() throws Exception { .rollbackAndCleanup(Mockito.anyBoolean(), Mockito.anyObject()); } - @Ignore( - "This test is here to allow testing with other databases like mysql / postgres etc\n" - + " with user changes to the code. This cannot be run on apache derby because of\n" - + " https://db.apache.org/derby/docs/10.10/devguide/cdevconcepts842385.html" - ) - @Test - public void testConcurrentAddNotifications() throws ExecutionException, InterruptedException { - - final int NUM_THREADS = 10; - CyclicBarrier cyclicBarrier = new CyclicBarrier(NUM_THREADS, - () -> LoggerFactory.getLogger("test") - .debug(NUM_THREADS + " threads going to add notification")); - - HiveConf conf = new HiveConf(); - conf.setVar(HiveConf.ConfVars.METASTORE_EXPRESSION_PROXY_CLASS, - MockPartitionExpressionProxy.class.getName()); - /* - Below are the properties that need to be set based on what database this test is going to be run - */ - -// conf.setVar(HiveConf.ConfVars.METASTORE_CONNECTION_DRIVER, "com.mysql.jdbc.Driver"); -// conf.setVar(HiveConf.ConfVars.METASTORECONNECTURLKEY, -// "jdbc:mysql://localhost:3306/metastore_db"); -// conf.setVar(HiveConf.ConfVars.METASTORE_CONNECTION_USER_NAME, ""); -// conf.setVar(HiveConf.ConfVars.METASTOREPWD, ""); - - /* - we have to add this one manually as for tests the db is initialized via the metastoreDiretSQL - and we don't run the schema creation sql that includes the an insert for notification_sequence - which can be locked. the entry in notification_sequence happens via notification_event insertion. - */ - objectStore.getPersistenceManager().newQuery(MNotificationLog.class, "eventType==''").execute(); - objectStore.getPersistenceManager().newQuery(MNotificationNextId.class, "nextEventId==-1").execute(); - - objectStore.addNotificationEvent( - new NotificationEvent(0, 0, - EventMessage.EventType.CREATE_DATABASE.toString(), - "CREATE DATABASE DB initial")); - - ExecutorService executorService = Executors.newFixedThreadPool(NUM_THREADS); - for (int i = 0; i < NUM_THREADS; i++) { - final int n = i; - - executorService.execute( - () -> { - ObjectStore store = new ObjectStore(); - store.setConf(conf); - - String eventType = EventMessage.EventType.CREATE_DATABASE.toString(); - NotificationEvent dbEvent = - new NotificationEvent(0, 0, eventType, - "CREATE DATABASE DB" + n); - System.out.println("ADDING NOTIFICATION"); - - try { - cyclicBarrier.await(); - } catch (InterruptedException | BrokenBarrierException e) { - throw new RuntimeException(e); - } - store.addNotificationEvent(dbEvent); - System.out.println("FINISH NOTIFICATION"); - }); - } - executorService.shutdown(); - assertTrue(executorService.awaitTermination(15, TimeUnit.SECONDS)); - - // we have to setup this again as the underlying PMF keeps getting reinitialized with original - // reference closed - ObjectStore store = new ObjectStore(); - store.setConf(conf); - - NotificationEventResponse eventResponse = store.getNextNotification( - new NotificationEventRequest()); - assertEquals(NUM_THREADS + 1, eventResponse.getEventsSize()); - long previousId = 0; - for (NotificationEvent event : eventResponse.getEvents()) { - assertTrue("previous:" + previousId + " current:" + event.getEventId(), - previousId < event.getEventId()); - assertTrue(previousId + 1 == event.getEventId()); - previousId = event.getEventId(); - } - } - @Test public void testRetryingExecutorSleep() throws Exception { - RetryingExecutor re = new ObjectStore.RetryingExecutor(new HiveConf(), null); - assertTrue("invalid sleep value", re.getSleepInterval() >= 0); + RetryingExecutor re = new ObjectStore.RetryingExecutor(MetastoreConf.newMetastoreConf(), null); + Assert.assertTrue("invalid sleep value", re.getSleepInterval() >= 0); } } + From 10dab98a5f43167696ecfab9990bf3eafd6f2706 Mon Sep 17 00:00:00 2001 From: Alan Gates Date: Mon, 9 Oct 2017 15:06:28 -0700 Subject: [PATCH 10/13] Fixed TestMetastoreExpr failing test. --- .../org/apache/hadoop/hive/metastore/PartFilterExprUtil.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/PartFilterExprUtil.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/PartFilterExprUtil.java index 3a7ee93cf838..2671c1fc572b 100644 --- a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/PartFilterExprUtil.java +++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/PartFilterExprUtil.java @@ -53,7 +53,7 @@ public static ExpressionTree makeExpressionTree(PartitionExpressionProxy express // can't be // moved until after HiveMetaStore is moved, which can't be moved until this is moved. Class exClass = JavaUtils.getClass( - "org.apache.hadoop.hive.metastore.IMetaStoreClient.IncompatibleMetastoreException", + "org.apache.hadoop.hive.metastore.IMetaStoreClient$IncompatibleMetastoreException", MetaException.class); throw JavaUtils.newInstance(exClass, new Class[]{String.class}, new Object[]{ex.getMessage()}); } From 5bdddbd9700d2c4e594fdeec891b20e7c7c4c961 Mon Sep 17 00:00:00 2001 From: Alan Gates Date: Tue, 10 Oct 2017 10:44:01 -0700 Subject: [PATCH 11/13] Fixes after checkin of HIVE-17629. --- .../hive/metastore/cache/CachedStore.java | 216 +++++++++--------- .../hive/metastore/cache/SharedCache.java | 16 +- 2 files changed, 115 insertions(+), 117 deletions(-) diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java index 30aa9bd18e77..92fcb56832fe 100644 --- a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java +++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java @@ -118,8 +118,8 @@ public class CachedStore implements RawStore, Configurable { private static ReentrantReadWriteLock partitionColStatsCacheLock = new ReentrantReadWriteLock( true); private static AtomicBoolean isPartitionColStatsCacheDirty = new AtomicBoolean(false); - RawStore rawStore = null; - Configuration conf; + private RawStore rawStore = null; + private Configuration conf; private PartitionExpressionProxy expressionProxy = null; // Default value set to 100 milliseconds for test purpose private static long cacheRefreshPeriod = 100; @@ -198,12 +198,12 @@ public int getRefCount() { public CachedStore() { } - public static void initSharedCacheAsync(HiveConf conf) { + public static void initSharedCacheAsync(Configuration conf) { String clazzName = null; boolean isEnabled = false; try { clazzName = MetastoreConf.getVar(conf, MetastoreConf.ConfVars.RAW_STORE_IMPL); - isEnabled = MetaStoreUtils.getClass(clazzName).isAssignableFrom(CachedStore.class); + isEnabled = JavaUtils.getClass(clazzName, RawStore.class).isAssignableFrom(CachedStore.class); } catch (MetaException e) { LOG.error("Cannot instantiate metastore class", e); } @@ -255,7 +255,7 @@ static void prewarm(RawStore rawStore) throws Exception { Deadline.stopTimer(); for (Partition partition : partitions) { sharedCache.addPartitionToCache(StringUtils.normalizeIdentifier(dbName), - HiveStringUtils.normalizeIdentifier(tblName), partition); + StringUtils.normalizeIdentifier(tblName), partition); } // Cache partition column stats Deadline.startTimer("getColStatsForTablePartitions"); @@ -330,11 +330,10 @@ static class CacheUpdateMasterWork implements Runnable { private final RawStore rawStore; public CacheUpdateMasterWork(Configuration conf) { - String rawStoreClassName = HiveConf.getVar(conf, - ConfVars.METASTORE_CACHED_RAW_STORE_IMPL, ObjectStore.class.getName()); + String rawStoreClassName = MetastoreConf.getVar(conf, ConfVars.CACHED_RAW_STORE_IMPL, + ObjectStore.class.getName()); try { - rawStore = ((Class) MetaStoreUtils.getClass( - rawStoreClassName)).newInstance(); + rawStore = JavaUtils.getClass(rawStoreClassName, RawStore.class).newInstance(); rawStore.setConf(conf); } catch (InstantiationException | IllegalAccessException | MetaException e) { // MetaException here really means ClassNotFound (see the utility method). @@ -644,7 +643,7 @@ public List getDatabases(String pattern) throws MetaException { if (sharedCache == null) { return rawStore.getDatabases(pattern); } - List results = new ArrayList(); + List results = new ArrayList<>(); for (String dbName : sharedCache.listCachedDatabases()) { dbName = StringUtils.normalizeIdentifier(dbName); if (CacheUtils.matches(dbName, pattern)) { @@ -865,8 +864,8 @@ public boolean dropPartition(String dbName, String tableName, List part_ // Wait if background cache update is happening partitionCacheLock.readLock().lock(); isPartitionCacheDirty.set(true); - sharedCache.removePartitionFromCache(HiveStringUtils.normalizeIdentifier(dbName), - HiveStringUtils.normalizeIdentifier(tableName), part_vals); + sharedCache.removePartitionFromCache(StringUtils.normalizeIdentifier(dbName), + StringUtils.normalizeIdentifier(tableName), part_vals); } finally { partitionCacheLock.readLock().unlock(); } @@ -875,8 +874,8 @@ public boolean dropPartition(String dbName, String tableName, List part_ // Wait if background cache update is happening partitionColStatsCacheLock.readLock().lock(); isPartitionColStatsCacheDirty.set(true); - sharedCache.removePartitionColStatsFromCache(HiveStringUtils.normalizeIdentifier(dbName), - HiveStringUtils.normalizeIdentifier(tableName), part_vals); + sharedCache.removePartitionColStatsFromCache(StringUtils.normalizeIdentifier(dbName), + StringUtils.normalizeIdentifier(tableName), part_vals); } finally { partitionColStatsCacheLock.readLock().unlock(); } @@ -891,8 +890,8 @@ public List getPartitions(String dbName, String tableName, int max) if (sharedCache == null) { return rawStore.getPartitions(dbName, tableName, max); } - List parts = sharedCache.listCachedPartitions(HiveStringUtils.normalizeIdentifier(dbName), - HiveStringUtils.normalizeIdentifier(tableName), max); + List parts = sharedCache.listCachedPartitions(StringUtils.normalizeIdentifier(dbName), + StringUtils.normalizeIdentifier(tableName), max); if (parts != null) { for (Partition part : parts) { part.unsetPrivileges(); @@ -913,8 +912,8 @@ public void alterTable(String dbName, String tblName, Table newTable) // Wait if background cache update is happening tableCacheLock.readLock().lock(); isTableCacheDirty.set(true); - sharedCache.alterTableInCache(HiveStringUtils.normalizeIdentifier(dbName), - HiveStringUtils.normalizeIdentifier(tblName), newTable); + sharedCache.alterTableInCache(StringUtils.normalizeIdentifier(dbName), + StringUtils.normalizeIdentifier(tblName), newTable); } finally { tableCacheLock.readLock().unlock(); } @@ -924,8 +923,8 @@ public void alterTable(String dbName, String tblName, Table newTable) // Wait if background cache update is happening partitionCacheLock.readLock().lock(); isPartitionCacheDirty.set(true); - sharedCache.alterTableInPartitionCache(HiveStringUtils.normalizeIdentifier(dbName), - HiveStringUtils.normalizeIdentifier(tblName), newTable); + sharedCache.alterTableInPartitionCache(StringUtils.normalizeIdentifier(dbName), + StringUtils.normalizeIdentifier(tblName), newTable); } finally { partitionCacheLock.readLock().unlock(); } @@ -938,8 +937,8 @@ public List getTables(String dbName, String pattern) if (sharedCache == null) { return rawStore.getTables(dbName, pattern); } - List tableNames = new ArrayList(); - for (Table table : sharedCache.listCachedTables(HiveStringUtils.normalizeIdentifier(dbName))) { + List tableNames = new ArrayList<>(); + for (Table table : sharedCache.listCachedTables(StringUtils.normalizeIdentifier(dbName))) { if (CacheUtils.matches(table.getTableName(), pattern)) { tableNames.add(table.getTableName()); } @@ -954,8 +953,8 @@ public List getTables(String dbName, String pattern, if (sharedCache == null) { return rawStore.getTables(dbName, pattern); } - List tableNames = new ArrayList(); - for (Table table : sharedCache.listCachedTables(HiveStringUtils.normalizeIdentifier(dbName))) { + List tableNames = new ArrayList<>(); + for (Table table : sharedCache.listCachedTables(StringUtils.normalizeIdentifier(dbName))) { if (CacheUtils.matches(table.getTableName(), pattern) && table.getTableType().equals(tableType.toString())) { tableNames.add(table.getTableName()); @@ -971,8 +970,8 @@ public List getTableMeta(String dbNames, String tableNames, if (sharedCache == null) { return rawStore.getTableMeta(dbNames, tableNames, tableTypes); } - return sharedCache.getTableMeta(HiveStringUtils.normalizeIdentifier(dbNames), - HiveStringUtils.normalizeIdentifier(tableNames), tableTypes); + return sharedCache.getTableMeta(StringUtils.normalizeIdentifier(dbNames), + StringUtils.normalizeIdentifier(tableNames), tableTypes); } @Override @@ -982,10 +981,10 @@ public List

getTableObjectsByName(String dbName, if (sharedCache == null) { return rawStore.getTableObjectsByName(dbName, tblNames); } - List
tables = new ArrayList
(); + List
tables = new ArrayList<>(); for (String tblName : tblNames) { - tables.add(sharedCache.getTableFromCache(HiveStringUtils.normalizeIdentifier(dbName), - HiveStringUtils.normalizeIdentifier(tblName))); + tables.add(sharedCache.getTableFromCache(StringUtils.normalizeIdentifier(dbName), + StringUtils.normalizeIdentifier(tblName))); } return tables; } @@ -1000,9 +999,9 @@ public List getAllTables(String dbName) throws MetaException { } private static List getAllTablesInternal(String dbName, SharedCache sharedCache) { - List tblNames = new ArrayList(); - for (Table tbl : sharedCache.listCachedTables(HiveStringUtils.normalizeIdentifier(dbName))) { - tblNames.add(HiveStringUtils.normalizeIdentifier(tbl.getTableName())); + List tblNames = new ArrayList<>(); + for (Table tbl : sharedCache.listCachedTables(StringUtils.normalizeIdentifier(dbName))) { + tblNames.add(StringUtils.normalizeIdentifier(tbl.getTableName())); } return tblNames; } @@ -1014,9 +1013,9 @@ public List listTableNamesByFilter(String dbName, String filter, if (sharedCache == null) { return rawStore.listTableNamesByFilter(dbName, filter, max_tables); } - List tableNames = new ArrayList(); + List tableNames = new ArrayList<>(); int count = 0; - for (Table table : sharedCache.listCachedTables(HiveStringUtils.normalizeIdentifier(dbName))) { + for (Table table : sharedCache.listCachedTables(StringUtils.normalizeIdentifier(dbName))) { if (CacheUtils.matches(table.getTableName(), filter) && (max_tables == -1 || count < max_tables)) { tableNames.add(table.getTableName()); @@ -1033,12 +1032,12 @@ public List listPartitionNames(String dbName, String tblName, if (sharedCache == null) { return rawStore.listPartitionNames(dbName, tblName, max_parts); } - List partitionNames = new ArrayList(); - Table t = sharedCache.getTableFromCache(HiveStringUtils.normalizeIdentifier(dbName), - HiveStringUtils.normalizeIdentifier(tblName)); + List partitionNames = new ArrayList<>(); + Table t = sharedCache.getTableFromCache(StringUtils.normalizeIdentifier(dbName), + StringUtils.normalizeIdentifier(tblName)); int count = 0; - for (Partition part : sharedCache.listCachedPartitions(HiveStringUtils.normalizeIdentifier(dbName), - HiveStringUtils.normalizeIdentifier(tblName), max_parts)) { + for (Partition part : sharedCache.listCachedPartitions(StringUtils.normalizeIdentifier(dbName), + StringUtils.normalizeIdentifier(tblName), max_parts)) { if (max_parts == -1 || count < max_parts) { partitionNames.add(Warehouse.makePartName(t.getPartitionKeys(), part.getValues())); } @@ -1071,8 +1070,8 @@ public void alterPartition(String dbName, String tblName, List partVals, // Wait if background cache update is happening partitionCacheLock.readLock().lock(); isPartitionCacheDirty.set(true); - sharedCache.alterPartitionInCache(HiveStringUtils.normalizeIdentifier(dbName), - HiveStringUtils.normalizeIdentifier(tblName), partVals, newPart); + sharedCache.alterPartitionInCache(StringUtils.normalizeIdentifier(dbName), + StringUtils.normalizeIdentifier(tblName), partVals, newPart); } finally { partitionCacheLock.readLock().unlock(); } @@ -1081,8 +1080,8 @@ public void alterPartition(String dbName, String tblName, List partVals, // Wait if background cache update is happening partitionColStatsCacheLock.readLock().lock(); isPartitionColStatsCacheDirty.set(true); - sharedCache.alterPartitionInColStatsCache(HiveStringUtils.normalizeIdentifier(dbName), - HiveStringUtils.normalizeIdentifier(tblName), partVals, newPart); + sharedCache.alterPartitionInColStatsCache(StringUtils.normalizeIdentifier(dbName), + StringUtils.normalizeIdentifier(tblName), partVals, newPart); } finally { partitionColStatsCacheLock.readLock().unlock(); } @@ -1102,8 +1101,8 @@ public void alterPartitions(String dbName, String tblName, List> pa for (int i = 0; i < partValsList.size(); i++) { List partVals = partValsList.get(i); Partition newPart = newParts.get(i); - sharedCache.alterPartitionInCache(HiveStringUtils.normalizeIdentifier(dbName), - HiveStringUtils.normalizeIdentifier(tblName), partVals, newPart); + sharedCache.alterPartitionInCache(StringUtils.normalizeIdentifier(dbName), + StringUtils.normalizeIdentifier(tblName), partVals, newPart); } } finally { partitionCacheLock.readLock().unlock(); @@ -1116,8 +1115,8 @@ public void alterPartitions(String dbName, String tblName, List> pa for (int i = 0; i < partValsList.size(); i++) { List partVals = partValsList.get(i); Partition newPart = newParts.get(i); - sharedCache.alterPartitionInColStatsCache(HiveStringUtils.normalizeIdentifier(dbName), - HiveStringUtils.normalizeIdentifier(tblName), partVals, newPart); + sharedCache.alterPartitionInColStatsCache(StringUtils.normalizeIdentifier(dbName), + StringUtils.normalizeIdentifier(tblName), partVals, newPart); } } finally { partitionColStatsCacheLock.readLock().unlock(); @@ -1164,8 +1163,8 @@ private boolean getPartitionNamesPrunedByExprNoTxn(Table table, byte[] expr, String defaultPartName, short maxParts, List result, SharedCache sharedCache) throws MetaException, NoSuchObjectException { List parts = sharedCache.listCachedPartitions( - HiveStringUtils.normalizeIdentifier(table.getDbName()), - HiveStringUtils.normalizeIdentifier(table.getTableName()), maxParts); + StringUtils.normalizeIdentifier(table.getDbName()), + StringUtils.normalizeIdentifier(table.getTableName()), maxParts); for (Partition part : parts) { result.add(Warehouse.makePartName(table.getPartitionKeys(), part.getValues())); } @@ -1193,14 +1192,14 @@ public boolean getPartitionsByExpr(String dbName, String tblName, byte[] expr, return rawStore.getPartitionsByExpr( dbName, tblName, expr, defaultPartitionName, maxParts, result); } - List partNames = new LinkedList(); - Table table = sharedCache.getTableFromCache(HiveStringUtils.normalizeIdentifier(dbName), - HiveStringUtils.normalizeIdentifier(tblName)); + List partNames = new LinkedList<>(); + Table table = sharedCache.getTableFromCache(StringUtils.normalizeIdentifier(dbName), + StringUtils.normalizeIdentifier(tblName)); boolean hasUnknownPartitions = getPartitionNamesPrunedByExprNoTxn( table, expr, defaultPartitionName, maxParts, partNames, sharedCache); for (String partName : partNames) { - Partition part = sharedCache.getPartitionFromCache(HiveStringUtils.normalizeIdentifier(dbName), - HiveStringUtils.normalizeIdentifier(tblName), partNameToVals(partName)); + Partition part = sharedCache.getPartitionFromCache(StringUtils.normalizeIdentifier(dbName), + StringUtils.normalizeIdentifier(tblName), partNameToVals(partName)); part.unsetPrivileges(); result.add(part); } @@ -1215,8 +1214,8 @@ public int getNumPartitionsByFilter(String dbName, String tblName, // if (sharedCache == null) { return rawStore.getNumPartitionsByFilter(dbName, tblName, filter); // } - // Table table = sharedCache.getTableFromCache(HiveStringUtils.normalizeIdentifier(dbName), - // HiveStringUtils.normalizeIdentifier(tblName)); + // Table table = sharedCache.getTableFromCache(StringUtils.normalizeIdentifier(dbName), + // StringUtils.normalizeIdentifier(tblName)); // return 0; } @@ -1227,10 +1226,10 @@ public int getNumPartitionsByExpr(String dbName, String tblName, byte[] expr) if (sharedCache == null) { return rawStore.getNumPartitionsByExpr(dbName, tblName, expr); } - String defaultPartName = HiveConf.getVar(getConf(), HiveConf.ConfVars.DEFAULTPARTITIONNAME); - List partNames = new LinkedList(); - Table table = sharedCache.getTableFromCache(HiveStringUtils.normalizeIdentifier(dbName), - HiveStringUtils.normalizeIdentifier(tblName)); + String defaultPartName = MetastoreConf.getVar(getConf(), ConfVars.DEFAULTPARTITIONNAME); + List partNames = new LinkedList<>(); + Table table = sharedCache.getTableFromCache(StringUtils.normalizeIdentifier(dbName), + StringUtils.normalizeIdentifier(tblName)); getPartitionNamesPrunedByExprNoTxn( table, expr, defaultPartName, Short.MAX_VALUE, partNames, sharedCache); return partNames.size(); @@ -1253,10 +1252,10 @@ public List getPartitionsByNames(String dbName, String tblName, if (sharedCache == null) { return rawStore.getPartitionsByNames(dbName, tblName, partNames); } - List partitions = new ArrayList(); + List partitions = new ArrayList<>(); for (String partName : partNames) { - Partition part = sharedCache.getPartitionFromCache(HiveStringUtils.normalizeIdentifier(dbName), - HiveStringUtils.normalizeIdentifier(tblName), partNameToVals(partName)); + Partition part = sharedCache.getPartitionFromCache(StringUtils.normalizeIdentifier(dbName), + StringUtils.normalizeIdentifier(tblName), partNameToVals(partName)); if (part!=null) { partitions.add(part); } @@ -1428,11 +1427,11 @@ public Partition getPartitionWithAuth(String dbName, String tblName, if (sharedCache == null) { return rawStore.getPartitionWithAuth(dbName, tblName, partVals, userName, groupNames); } - Partition p = sharedCache.getPartitionFromCache(HiveStringUtils.normalizeIdentifier(dbName), - HiveStringUtils.normalizeIdentifier(tblName), partVals); + Partition p = sharedCache.getPartitionFromCache(StringUtils.normalizeIdentifier(dbName), + StringUtils.normalizeIdentifier(tblName), partVals); if (p!=null) { - Table t = sharedCache.getTableFromCache(HiveStringUtils.normalizeIdentifier(dbName), - HiveStringUtils.normalizeIdentifier(tblName)); + Table t = sharedCache.getTableFromCache(StringUtils.normalizeIdentifier(dbName), + StringUtils.normalizeIdentifier(tblName)); String partName = Warehouse.makePartName(t.getPartitionKeys(), partVals); PrincipalPrivilegeSet privs = getPartitionPrivilegeSet(dbName, tblName, partName, userName, groupNames); @@ -1449,12 +1448,12 @@ public List getPartitionsWithAuth(String dbName, String tblName, if (sharedCache == null) { return rawStore.getPartitionsWithAuth(dbName, tblName, maxParts, userName, groupNames); } - Table t = sharedCache.getTableFromCache(HiveStringUtils.normalizeIdentifier(dbName), - HiveStringUtils.normalizeIdentifier(tblName)); - List partitions = new ArrayList(); + Table t = sharedCache.getTableFromCache(StringUtils.normalizeIdentifier(dbName), + StringUtils.normalizeIdentifier(tblName)); + List partitions = new ArrayList<>(); int count = 0; - for (Partition part : sharedCache.listCachedPartitions(HiveStringUtils.normalizeIdentifier(dbName), - HiveStringUtils.normalizeIdentifier(tblName), maxParts)) { + for (Partition part : sharedCache.listCachedPartitions(StringUtils.normalizeIdentifier(dbName), + StringUtils.normalizeIdentifier(tblName), maxParts)) { if (maxParts == -1 || count < maxParts) { String partName = Warehouse.makePartName(t.getPartitionKeys(), part.getValues()); PrincipalPrivilegeSet privs = getPartitionPrivilegeSet(dbName, tblName, partName, @@ -1475,12 +1474,12 @@ public List listPartitionNamesPs(String dbName, String tblName, if (sharedCache == null) { return rawStore.listPartitionNamesPs(dbName, tblName, partVals, maxParts); } - List partNames = new ArrayList(); + List partNames = new ArrayList<>(); int count = 0; - Table t = sharedCache.getTableFromCache(HiveStringUtils.normalizeIdentifier(dbName), - HiveStringUtils.normalizeIdentifier(tblName)); - for (Partition part : sharedCache.listCachedPartitions(HiveStringUtils.normalizeIdentifier(dbName), - HiveStringUtils.normalizeIdentifier(tblName), maxParts)) { + Table t = sharedCache.getTableFromCache(StringUtils.normalizeIdentifier(dbName), + StringUtils.normalizeIdentifier(tblName)); + for (Partition part : sharedCache.listCachedPartitions(StringUtils.normalizeIdentifier(dbName), + StringUtils.normalizeIdentifier(tblName), maxParts)) { boolean psMatch = true; for (int i=0;i listPartitionsPsWithAuth(String dbName, return rawStore.listPartitionsPsWithAuth( dbName, tblName, partVals, maxParts, userName, groupNames); } - List partitions = new ArrayList(); - Table t = sharedCache.getTableFromCache(HiveStringUtils.normalizeIdentifier(dbName), - HiveStringUtils.normalizeIdentifier(tblName)); + List partitions = new ArrayList<>(); + Table t = sharedCache.getTableFromCache(StringUtils.normalizeIdentifier(dbName), + StringUtils.normalizeIdentifier(tblName)); int count = 0; - for (Partition part : sharedCache.listCachedPartitions(HiveStringUtils.normalizeIdentifier(dbName), - HiveStringUtils.normalizeIdentifier(tblName), maxParts)) { + for (Partition part : sharedCache.listCachedPartitions(StringUtils.normalizeIdentifier(dbName), + StringUtils.normalizeIdentifier(tblName), maxParts)) { boolean psMatch = true; for (int i=0;i colStatObjs = new ArrayList<>(); for (String colName : colNames) { String colStatsCacheKey = - CacheUtils.buildKey(HiveStringUtils.normalizeIdentifier(dbName), - HiveStringUtils.normalizeIdentifier(tableName), colName); + CacheUtils.buildKey(StringUtils.normalizeIdentifier(dbName), + StringUtils.normalizeIdentifier(tableName), colName); ColumnStatisticsObj colStat = sharedCache.getCachedTableColStats(colStatsCacheKey); if (colStat != null) { colStatObjs.add(colStat); @@ -1618,8 +1617,8 @@ public boolean deleteTableColumnStatistics(String dbName, String tableName, Stri // Wait if background cache update is happening tableColStatsCacheLock.readLock().lock(); isTableColStatsCacheDirty.set(true); - sharedCache.removeTableColStatsFromCache(HiveStringUtils.normalizeIdentifier(dbName), - HiveStringUtils.normalizeIdentifier(tableName), colName); + sharedCache.removeTableColStatsFromCache(StringUtils.normalizeIdentifier(dbName), + StringUtils.normalizeIdentifier(tableName), colName); } finally { tableColStatsCacheLock.readLock().unlock(); } @@ -1649,8 +1648,8 @@ public boolean updatePartitionColumnStatistics(ColumnStatistics colStats, List return rawStore.get_aggr_stats_for(dbName, tblName, partNames, colNames); } List colStats = mergeColStatsForPartitions( - HiveStringUtils.normalizeIdentifier(dbName), HiveStringUtils.normalizeIdentifier(tblName), + StringUtils.normalizeIdentifier(dbName), StringUtils.normalizeIdentifier(tblName), partNames, colNames, sharedCache); return new AggrStats(colStats, partNames.size()); } @@ -1717,10 +1716,9 @@ public AggrStats get_aggr_stats_for(String dbName, String tblName, List private List mergeColStatsForPartitions(String dbName, String tblName, List partNames, List colNames, SharedCache sharedCache) throws MetaException { - final boolean useDensityFunctionForNDVEstimation = HiveConf.getBoolVar(getConf(), - HiveConf.ConfVars.HIVE_METASTORE_STATS_NDV_DENSITY_FUNCTION); - final double ndvTuner = HiveConf.getFloatVar(getConf(), - HiveConf.ConfVars.HIVE_METASTORE_STATS_NDV_TUNER); + final boolean useDensityFunctionForNDVEstimation = MetastoreConf.getBoolVar(getConf(), + ConfVars.STATS_NDV_DENSITY_FUNCTION); + final double ndvTuner = MetastoreConf.getDoubleVar(getConf(), ConfVars.STATS_NDV_TUNER); Map> map = new HashMap<>(); for (String colName : colNames) { @@ -1826,8 +1824,8 @@ public void dropPartitions(String dbName, String tblName, List partNames isPartitionCacheDirty.set(true); for (String partName : partNames) { List vals = partNameToVals(partName); - sharedCache.removePartitionFromCache(HiveStringUtils.normalizeIdentifier(dbName), - HiveStringUtils.normalizeIdentifier(tblName), vals); + sharedCache.removePartitionFromCache(StringUtils.normalizeIdentifier(dbName), + StringUtils.normalizeIdentifier(tblName), vals); } } finally { partitionCacheLock.readLock().unlock(); @@ -1839,8 +1837,8 @@ public void dropPartitions(String dbName, String tblName, List partNames isPartitionColStatsCacheDirty.set(true); for (String partName : partNames) { List part_vals = partNameToVals(partName); - sharedCache.removePartitionColStatsFromCache(HiveStringUtils.normalizeIdentifier(dbName), - HiveStringUtils.normalizeIdentifier(tblName), part_vals); + sharedCache.removePartitionColStatsFromCache(StringUtils.normalizeIdentifier(dbName), + StringUtils.normalizeIdentifier(tblName), part_vals); } } finally { partitionColStatsCacheLock.readLock().unlock(); @@ -2078,8 +2076,8 @@ public List createTableWithConstraints(Table tbl, uniqueConstraints, notNullConstraints); SharedCache sharedCache = sharedCacheWrapper.get(); if (sharedCache == null) return constraintNames; - sharedCache.addTableToCache(HiveStringUtils.normalizeIdentifier(tbl.getDbName()), - HiveStringUtils.normalizeIdentifier(tbl.getTableName()), tbl); + sharedCache.addTableToCache(StringUtils.normalizeIdentifier(tbl.getDbName()), + StringUtils.normalizeIdentifier(tbl.getTableName()), tbl); return constraintNames; } @@ -2142,7 +2140,7 @@ public String getMetastoreDbUuid() throws MetaException { // CachedStore that's specific to SharedCache (e.g. update threads) should be refactored to // be part of this, then this could be moved out of this file (or merged with SharedCache). private static final class SharedCacheWrapper { - private static enum InitState { + private enum InitState { NOT_ENABLED, INITIALIZING, INITIALIZED, FAILED_FATAL } @@ -2177,7 +2175,7 @@ void updateInitState(Throwable error, boolean isFatal) { } } - void startInit(HiveConf conf) { + void startInit(Configuration conf) { LOG.info("Initializing shared cache"); synchronized (initLock) { assert initState == InitState.NOT_ENABLED; diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/cache/SharedCache.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/cache/SharedCache.java index 043ddddeea84..a76b8480b0c6 100644 --- a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/cache/SharedCache.java +++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/cache/SharedCache.java @@ -48,8 +48,8 @@ import com.google.common.annotations.VisibleForTesting; public class SharedCache { - private Map databaseCache = new TreeMap(); - private Map tableCache = new TreeMap(); + private Map databaseCache = new TreeMap<>(); + private Map tableCache = new TreeMap<>(); private Map partitionCache = new TreeMap<>(); private Map partitionColStatsCache = new TreeMap<>(); private Map tableColStatsCache = new TreeMap<>(); @@ -81,12 +81,12 @@ public synchronized void removeDatabaseFromCache(String dbName) { } public synchronized List listCachedDatabases() { - return new ArrayList(databaseCache.keySet()); + return new ArrayList<>(databaseCache.keySet()); } public synchronized void alterDatabaseInCache(String dbName, Database newDb) { - removeDatabaseFromCache(HiveStringUtils.normalizeIdentifier(dbName)); - addDatabaseToCache(HiveStringUtils.normalizeIdentifier(newDb.getName()), newDb.deepCopy()); + removeDatabaseFromCache(StringUtils.normalizeIdentifier(dbName)); + addDatabaseToCache(StringUtils.normalizeIdentifier(newDb.getName()), newDb.deepCopy()); } public synchronized int getCachedDatabaseCount() { @@ -248,7 +248,7 @@ public synchronized int getCachedTableCount() { } public synchronized List
listCachedTables(String dbName) { - List
tables = new ArrayList
(); + List
tables = new ArrayList<>(); for (TableWrapper wrapper : tableCache.values()) { if (wrapper.getTable().getDbName().equals(dbName)) { tables.add(CacheUtils.assemble(wrapper, this)); @@ -258,7 +258,7 @@ public synchronized List
listCachedTables(String dbName) { } public synchronized List getTableMeta(String dbNames, String tableNames, List tableTypes) { - List tableMetas = new ArrayList(); + List tableMetas = new ArrayList<>(); for (String dbName : listCachedDatabases()) { if (CacheUtils.matches(dbName, dbNames)) { for (Table table : listCachedTables(dbName)) { @@ -354,7 +354,7 @@ public synchronized void removePartitionColStatsFromCache(String dbName, String } public synchronized List listCachedPartitions(String dbName, String tblName, int max) { - List partitions = new ArrayList(); + List partitions = new ArrayList<>(); int count = 0; for (PartitionWrapper wrapper : partitionCache.values()) { if (wrapper.getPartition().getDbName().equals(dbName) From 546b35d96d9f95255aca52684a6f4507a6ae3700 Mon Sep 17 00:00:00 2001 From: Alan Gates Date: Wed, 11 Oct 2017 07:45:48 -0700 Subject: [PATCH 12/13] Changes to make sure datanucleus and javax.jdo properties are always picked up, based on Zoltan's review comments. --- .../hadoop/hive/metastore/ObjectStore.java | 22 +++++++++++++++++++ .../hive/metastore/TestObjectStore.java | 18 +++++++++++++++ 2 files changed, 40 insertions(+) diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java index c04f5bffbc78..5665360b8aba 100644 --- a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java +++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java @@ -502,6 +502,8 @@ private static Properties getDataSourceProps(Configuration conf) { Properties prop = new Properties(); correctAutoStartMechanism(conf); + // First, go through and set all our values for datanucleus and javax.jdo parameters. This + // has to be a separate first step because we don't set the default values in the config object. for (ConfVars var : MetastoreConf.dataNucleusAndJdoConfs) { String confVal = MetastoreConf.getAsString(conf, var); Object prevVal = prop.setProperty(var.varname, confVal); @@ -510,6 +512,21 @@ private static Properties getDataSourceProps(Configuration conf) { + " from jpox.properties with " + confVal); } } + + // Now, we need to look for any values that the user set that MetastoreConf doesn't know about. + for (Map.Entry e : conf) { + if (e.getKey().startsWith("datanucleus.") || e.getKey().startsWith("javax.jdo.")) { + // We have to handle this differently depending on whether it is a value known to + // MetastoreConf or not. If it is, we need to get the default value if a value isn't + // provided. If not, we just set whatever the user has set. + Object prevVal = prop.setProperty(e.getKey(), e.getValue()); + if (LOG.isDebugEnabled() && MetastoreConf.isPrintable(e.getKey())) { + LOG.debug("Overriding " + e.getKey() + " value " + prevVal + + " from jpox.properties with " + e.getValue()); + } + } + } + // Password may no longer be in the conf, use getPassword() try { String passwd = MetastoreConf.getPassword(conf, MetastoreConf.ConfVars.PWD); @@ -9255,4 +9272,9 @@ void rollbackAndCleanup(boolean success, QueryWrapper queryWrapper) { public static void setTwoMetastoreTesting(boolean twoMetastoreTesting) { forTwoMetastoreTesting = twoMetastoreTesting; } + + @VisibleForTesting + Properties getProp() { + return prop; + } } diff --git a/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestObjectStore.java b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestObjectStore.java index 85ea3f280293..190d75e3090d 100644 --- a/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestObjectStore.java +++ b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestObjectStore.java @@ -44,6 +44,7 @@ import org.apache.hadoop.hive.metastore.metrics.MetricsConstants; import org.apache.hadoop.hive.ql.io.sarg.SearchArgument; import org.junit.Assert; +import org.junit.Assume; import org.junit.Before; import org.junit.Test; import org.mockito.Mockito; @@ -423,5 +424,22 @@ public void testRetryingExecutorSleep() throws Exception { Assert.assertTrue("invalid sleep value", re.getSleepInterval() >= 0); } + @Test + public void testNonConfDatanucleusValueSet() { + String key = "datanucleus.no.such.key"; + String value = "test_value"; + String key1 = "blabla.no.such.key"; + String value1 = "another_value"; + Assume.assumeTrue(System.getProperty(key) == null); + Configuration localConf = MetastoreConf.newMetastoreConf(); + MetastoreConf.setVar(localConf, MetastoreConf.ConfVars.EXPRESSION_PROXY_CLASS, + MockPartitionExpressionProxy.class.getName()); + localConf.set(key, value); + localConf.set(key1, value1); + objectStore = new ObjectStore(); + objectStore.setConf(localConf); + Assert.assertEquals(value, objectStore.getProp().getProperty(key)); + Assert.assertNull(objectStore.getProp().getProperty(key1)); + } } From 69a7da9e9363d34c9b3aafca0d24bd6817a6c3f4 Mon Sep 17 00:00:00 2001 From: Alan Gates Date: Thu, 12 Oct 2017 10:19:26 -0700 Subject: [PATCH 13/13] Commented out changes in ObjectStore based on HIVE-17788. Also think I fixed the occasionally failing ACID tests with a change to AcidOpenTxnsCounterService. --- .../java/org/apache/hadoop/hive/metastore/ObjectStore.java | 4 ++++ .../hadoop/hive/metastore/txn/AcidOpenTxnsCounterService.java | 3 ++- .../org/apache/hadoop/hive/metastore/TestObjectStore.java | 2 ++ 3 files changed, 8 insertions(+), 1 deletion(-) diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java index 5665360b8aba..c533d7908263 100644 --- a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java +++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java @@ -514,6 +514,9 @@ private static Properties getDataSourceProps(Configuration conf) { } // Now, we need to look for any values that the user set that MetastoreConf doesn't know about. + // TODO Commenting this out for now, as it breaks because the conf values aren't getting properly + // interpolated in case of variables. See HIVE-17788. + /* for (Map.Entry e : conf) { if (e.getKey().startsWith("datanucleus.") || e.getKey().startsWith("javax.jdo.")) { // We have to handle this differently depending on whether it is a value known to @@ -526,6 +529,7 @@ private static Properties getDataSourceProps(Configuration conf) { } } } + */ // Password may no longer be in the conf, use getPassword() try { diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/txn/AcidOpenTxnsCounterService.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/txn/AcidOpenTxnsCounterService.java index 1223b522b259..e3f7eca553c9 100644 --- a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/txn/AcidOpenTxnsCounterService.java +++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/txn/AcidOpenTxnsCounterService.java @@ -31,13 +31,13 @@ public class AcidOpenTxnsCounterService implements RunnableConfigurable { private Configuration conf; private int isAliveCounter = 0; private long lastLogTime = 0; + private TxnStore txnHandler; @Override public void run() { try { long startTime = System.currentTimeMillis(); isAliveCounter++; - TxnStore txnHandler = TxnUtils.getTxnStore(conf); txnHandler.countOpenTxns(); if (System.currentTimeMillis() - lastLogTime > 60 * 1000) { LOG.info("AcidOpenTxnsCounterService ran for " + @@ -54,6 +54,7 @@ public void run() { @Override public void setConf(Configuration configuration) { conf = configuration; + txnHandler = TxnUtils.getTxnStore(conf); } @Override diff --git a/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestObjectStore.java b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestObjectStore.java index 190d75e3090d..235d4299a780 100644 --- a/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestObjectStore.java +++ b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestObjectStore.java @@ -46,6 +46,7 @@ import org.junit.Assert; import org.junit.Assume; import org.junit.Before; +import org.junit.Ignore; import org.junit.Test; import org.mockito.Mockito; import org.slf4j.Logger; @@ -424,6 +425,7 @@ public void testRetryingExecutorSleep() throws Exception { Assert.assertTrue("invalid sleep value", re.getSleepInterval() >= 0); } + @Ignore // See comment in ObjectStore.getDataSourceProps @Test public void testNonConfDatanucleusValueSet() { String key = "datanucleus.no.such.key";