From 4c4619d61f98efd3a47c1ca6dd9f2048022565ab Mon Sep 17 00:00:00 2001 From: Alan Gates Date: Mon, 31 Jul 2017 14:38:09 -0700 Subject: [PATCH 1/3] Removed shims from metastore. For HDFS and getPassword I just access those operations directly. I copied all of the HadoopThriftAuthBridge stuff over from Hive common. --- .../hive/hcatalog/cli/TestPermsGrp.java | 4 +- .../mapreduce/TestHCatPartitionPublish.java | 6 +- .../security}/TestHadoopAuthBridge23.java | 5 +- ...bstractTestAuthorizationApiAuthorizer.java | 4 +- .../hive/metastore/TestFilterHooks.java | 4 +- ...stHiveMetaStoreWithEnvironmentContext.java | 4 +- .../metastore/TestMetaStoreAuthorization.java | 4 +- .../TestMetaStoreEndFunctionListener.java | 5 +- .../metastore/TestMetaStoreEventListener.java | 4 +- ...estMetaStoreEventListenerOnlyOnCommit.java | 4 +- .../metastore/TestMetaStoreInitListener.java | 5 +- .../TestMetaStoreListenersError.java | 6 +- .../hive/metastore/TestMetaStoreMetrics.java | 4 +- .../metastore/TestRemoteHiveMetaStore.java | 4 +- .../TestRemoteHiveMetaStoreIpAddress.java | 4 +- .../metastore/TestRetryingHMSHandler.java | 4 +- ...tDDLWithRemoteMetastoreSecondNamenode.java | 4 +- .../StorageBasedMetastoreTestBase.java | 5 +- .../TestAuthorizationPreEventListener.java | 4 +- .../TestClientSideAuthorizationProvider.java | 4 +- .../TestMetastoreAuthorizationProvider.java | 4 +- ...estMultiAuthorizationPreEventListener.java | 4 +- .../org/apache/hive/jdbc/miniHS2/MiniHS2.java | 4 +- .../hadoop/hive/metastore/HiveMetaStore.java | 27 +- .../hive/metastore/HiveMetaStoreClient.java | 10 +- .../hadoop/hive/metastore/MetaStoreUtils.java | 8 +- .../hadoop/hive/metastore/ObjectStore.java | 5 +- .../hive/metastore/TUGIBasedProcessor.java | 2 +- .../hadoop/hive/metastore/Warehouse.java | 4 +- .../hadoop/hive/metastore/txn/TxnDbUtil.java | 5 +- .../hadoop/hive/metastore/txn/TxnHandler.java | 5 +- standalone-metastore/pom.xml | 16 + .../hive/metastore/conf/MetastoreConf.java | 10 + .../security/DelegationTokenIdentifier.java | 52 ++ .../DelegationTokenSecretManager.java | 125 ++++ .../security/DelegationTokenStore.java | 118 ++++ .../security/HadoopThriftAuthBridge.java | 668 ++++++++++++++++++ .../security/HadoopThriftAuthBridge23.java | 114 +++ .../metastore/security/MemoryTokenStore.java | 136 ++++ .../MetastoreDelegationTokenManager.java | 166 +++++ .../metastore/security/TFilterTransport.java | 99 +++ .../security/TUGIAssumingTransport.java | 73 ++ .../security/TUGIContainingTransport.java | 96 +++ ...okenStoreDelegationTokenSecretManager.java | 333 +++++++++ .../hive/metastore/utils/HdfsUtils.java | 125 ++++ .../hive/metastore/utils/SecurityUtils.java | 40 ++ .../MetastoreDelegationTokenSupport.java | 68 ++ .../hive/metastore/utils/TestHdfsUtils.java | 193 +++++ 48 files changed, 2510 insertions(+), 88 deletions(-) rename itests/hive-unit-hadoop2/src/test/java/org/apache/hadoop/hive/{thrift => metastore/security}/TestHadoopAuthBridge23.java (98%) create mode 100644 standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/security/DelegationTokenIdentifier.java create mode 100644 standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/security/DelegationTokenSecretManager.java create mode 100644 standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/security/DelegationTokenStore.java create mode 100644 standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/security/HadoopThriftAuthBridge.java create mode 100644 standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/security/HadoopThriftAuthBridge23.java create mode 100644 standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/security/MemoryTokenStore.java create mode 100644 standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/security/MetastoreDelegationTokenManager.java create mode 100644 standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/security/TFilterTransport.java create mode 100644 standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/security/TUGIAssumingTransport.java create mode 100644 standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/security/TUGIContainingTransport.java create mode 100644 standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/security/TokenStoreDelegationTokenSecretManager.java create mode 100644 standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/HdfsUtils.java create mode 100644 standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/SecurityUtils.java create mode 100644 standalone-metastore/src/main/java/org/apache/hadoop/security/token/delegation/MetastoreDelegationTokenSupport.java create mode 100644 standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/utils/TestHdfsUtils.java diff --git a/hcatalog/core/src/test/java/org/apache/hive/hcatalog/cli/TestPermsGrp.java b/hcatalog/core/src/test/java/org/apache/hive/hcatalog/cli/TestPermsGrp.java index 66a5dd43d544..e863372e3c7f 100644 --- a/hcatalog/core/src/test/java/org/apache/hive/hcatalog/cli/TestPermsGrp.java +++ b/hcatalog/core/src/test/java/org/apache/hive/hcatalog/cli/TestPermsGrp.java @@ -44,12 +44,12 @@ import org.apache.hadoop.hive.metastore.api.StorageDescriptor; import org.apache.hadoop.hive.metastore.api.Table; import org.apache.hadoop.hive.metastore.api.Type; +import org.apache.hadoop.hive.metastore.security.HadoopThriftAuthBridge; import org.apache.hadoop.hive.ql.io.HiveInputFormat; import org.apache.hadoop.hive.ql.io.HiveOutputFormat; import org.apache.hadoop.hive.ql.metadata.Hive; import org.apache.hadoop.hive.serde.serdeConstants; import org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe; -import org.apache.hadoop.hive.shims.ShimLoader; import org.apache.hive.hcatalog.ExitException; import org.apache.hive.hcatalog.NoExitSecurityManager; import org.apache.hive.hcatalog.cli.SemanticAnalysis.HCatSemanticAnalyzer; @@ -81,7 +81,7 @@ protected void setUp() throws Exception { msPort = MetaStoreUtils.findFreePort(); - MetaStoreUtils.startMetaStore(msPort, ShimLoader.getHadoopThriftAuthBridge()); + MetaStoreUtils.startMetaStore(msPort, HadoopThriftAuthBridge.getBridge()); isServerRunning = true; diff --git a/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestHCatPartitionPublish.java b/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestHCatPartitionPublish.java index 69874bcae88a..358dd5084077 100644 --- a/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestHCatPartitionPublish.java +++ b/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestHCatPartitionPublish.java @@ -41,11 +41,11 @@ import org.apache.hadoop.hive.metastore.api.SerDeInfo; import org.apache.hadoop.hive.metastore.api.StorageDescriptor; import org.apache.hadoop.hive.metastore.api.Table; +import org.apache.hadoop.hive.metastore.security.HadoopThriftAuthBridge; import org.apache.hadoop.hive.ql.io.RCFileInputFormat; import org.apache.hadoop.hive.ql.io.RCFileOutputFormat; import org.apache.hadoop.hive.serde.serdeConstants; import org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe; -import org.apache.hadoop.hive.shims.ShimLoader; import org.apache.hadoop.io.BytesWritable; import org.apache.hadoop.io.LongWritable; import org.apache.hadoop.io.Text; @@ -54,7 +54,6 @@ import org.apache.hadoop.mapreduce.Job; import org.apache.hadoop.mapreduce.Mapper; import org.apache.hadoop.mapreduce.lib.input.TextInputFormat; -import org.apache.hadoop.util.Shell; import org.apache.hive.hcatalog.NoExitSecurityManager; import org.apache.hive.hcatalog.cli.SemanticAnalysis.HCatSemanticAnalyzer; import org.apache.hive.hcatalog.data.DefaultHCatRecord; @@ -106,8 +105,7 @@ public static void setup() throws Exception { msPort = MetaStoreUtils.findFreePort(); - MetaStoreUtils.startMetaStore(msPort, ShimLoader - .getHadoopThriftAuthBridge()); + MetaStoreUtils.startMetaStore(msPort, HadoopThriftAuthBridge.getBridge()); Thread.sleep(10000); isServerRunning = true; securityManager = System.getSecurityManager(); diff --git a/itests/hive-unit-hadoop2/src/test/java/org/apache/hadoop/hive/thrift/TestHadoopAuthBridge23.java b/itests/hive-unit-hadoop2/src/test/java/org/apache/hadoop/hive/metastore/security/TestHadoopAuthBridge23.java similarity index 98% rename from itests/hive-unit-hadoop2/src/test/java/org/apache/hadoop/hive/thrift/TestHadoopAuthBridge23.java rename to itests/hive-unit-hadoop2/src/test/java/org/apache/hadoop/hive/metastore/security/TestHadoopAuthBridge23.java index 8656fffd761f..ef360400a187 100644 --- a/itests/hive-unit-hadoop2/src/test/java/org/apache/hadoop/hive/thrift/TestHadoopAuthBridge23.java +++ b/itests/hive-unit-hadoop2/src/test/java/org/apache/hadoop/hive/metastore/security/TestHadoopAuthBridge23.java @@ -16,7 +16,7 @@ * limitations under the License. */ -package org.apache.hadoop.hive.thrift; +package org.apache.hadoop.hive.metastore.security; import org.apache.hadoop.conf.Configuration; @@ -27,7 +27,6 @@ import org.apache.hadoop.hive.metastore.MetaStoreUtils; import org.apache.hadoop.hive.metastore.api.Database; import org.apache.hadoop.hive.metastore.api.MetaException; -import org.apache.hadoop.hive.thrift.HadoopThriftAuthBridge.Server.ServerMode; import org.apache.hadoop.io.Text; import org.apache.hadoop.security.SaslRpcServer; import org.apache.hadoop.security.SaslRpcServer.AuthMethod; @@ -71,7 +70,7 @@ public class TestHadoopAuthBridge23 { public static class MyTokenStore extends MemoryTokenStore { static volatile DelegationTokenStore TOKEN_STORE = null; - public void init(Object hmsHandler, ServerMode smode) throws TokenStoreException { + public void init(Object hmsHandler, HadoopThriftAuthBridge.Server.ServerMode smode) throws TokenStoreException { super.init(hmsHandler, smode); TOKEN_STORE = this; try { diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/AbstractTestAuthorizationApiAuthorizer.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/AbstractTestAuthorizationApiAuthorizer.java index 341c7ba533ad..5ea809f7d409 100644 --- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/AbstractTestAuthorizationApiAuthorizer.java +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/AbstractTestAuthorizationApiAuthorizer.java @@ -31,9 +31,9 @@ import org.apache.hadoop.hive.metastore.api.PrincipalType; import org.apache.hadoop.hive.metastore.api.PrivilegeBag; import org.apache.hadoop.hive.metastore.api.Role; +import org.apache.hadoop.hive.metastore.security.HadoopThriftAuthBridge; import org.apache.hadoop.hive.ql.security.authorization.MetaStoreAuthzAPIAuthorizerEmbedOnly; import org.apache.hadoop.hive.ql.security.authorization.AuthorizationPreEventListener; -import org.apache.hadoop.hive.shims.ShimLoader; import org.junit.Test; /** @@ -58,7 +58,7 @@ protected static void setup() throws Exception { hiveConf = new HiveConf(); if (isRemoteMetastoreMode) { int port = MetaStoreUtils.findFreePort(); - MetaStoreUtils.startMetaStore(port, ShimLoader.getHadoopThriftAuthBridge()); + MetaStoreUtils.startMetaStore(port, HadoopThriftAuthBridge.getBridge()); hiveConf.setVar(HiveConf.ConfVars.METASTOREURIS, "thrift://localhost:" + port); } hiveConf.setIntVar(HiveConf.ConfVars.METASTORETHRIFTCONNECTIONRETRIES, 3); diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestFilterHooks.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestFilterHooks.java index 1073abb4e1b8..fc85d86c41fd 100644 --- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestFilterHooks.java +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestFilterHooks.java @@ -35,9 +35,9 @@ import org.apache.hadoop.hive.metastore.api.Partition; import org.apache.hadoop.hive.metastore.api.PartitionSpec; import org.apache.hadoop.hive.metastore.api.Table; +import org.apache.hadoop.hive.metastore.security.HadoopThriftAuthBridge; import org.apache.hadoop.hive.ql.Driver; import org.apache.hadoop.hive.ql.session.SessionState; -import org.apache.hadoop.hive.shims.ShimLoader; import org.junit.AfterClass; import org.junit.BeforeClass; import org.junit.Test; @@ -175,7 +175,7 @@ public static void setUp() throws Exception { UtilsForTest.setNewDerbyDbLocation(hiveConf, TestFilterHooks.class.getSimpleName()); int port = MetaStoreUtils.findFreePort(); hiveConf.setVar(HiveConf.ConfVars.METASTOREURIS, "thrift://localhost:" + port); - MetaStoreUtils.startMetaStore(port, ShimLoader.getHadoopThriftAuthBridge(), hiveConf); + MetaStoreUtils.startMetaStore(port, HadoopThriftAuthBridge.getBridge(), hiveConf); SessionState.start(new CliSessionState(hiveConf)); msc = new HiveMetaStoreClient(hiveConf); diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStoreWithEnvironmentContext.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStoreWithEnvironmentContext.java index d6e4fb7632e3..dee5d7799e4d 100644 --- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStoreWithEnvironmentContext.java +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStoreWithEnvironmentContext.java @@ -42,12 +42,12 @@ import org.apache.hadoop.hive.metastore.events.DropPartitionEvent; import org.apache.hadoop.hive.metastore.events.DropTableEvent; import org.apache.hadoop.hive.metastore.events.ListenerEvent; +import org.apache.hadoop.hive.metastore.security.HadoopThriftAuthBridge; import org.apache.hadoop.hive.ql.io.HiveInputFormat; import org.apache.hadoop.hive.ql.io.HiveOutputFormat; import org.apache.hadoop.hive.ql.session.SessionState; import org.apache.hadoop.hive.serde.serdeConstants; import org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe; -import org.apache.hadoop.hive.shims.ShimLoader; /** * TestHiveMetaStoreWithEnvironmentContext. Test case for _with_environment_context @@ -74,7 +74,7 @@ protected void setUp() throws Exception { DummyListener.class.getName()); int port = MetaStoreUtils.findFreePort(); - MetaStoreUtils.startMetaStore(port, ShimLoader.getHadoopThriftAuthBridge()); + MetaStoreUtils.startMetaStore(port, HadoopThriftAuthBridge.getBridge()); hiveConf = new HiveConf(this.getClass()); hiveConf.setVar(HiveConf.ConfVars.METASTOREURIS, "thrift://localhost:" + port); diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreAuthorization.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreAuthorization.java index bfee53997a4a..6f0282cb85ca 100644 --- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreAuthorization.java +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreAuthorization.java @@ -32,7 +32,7 @@ import org.apache.hadoop.hive.metastore.api.Database; import org.apache.hadoop.hive.metastore.api.MetaException; import org.apache.hadoop.hive.metastore.api.NoSuchObjectException; -import org.apache.hadoop.hive.shims.ShimLoader; +import org.apache.hadoop.hive.metastore.security.HadoopThriftAuthBridge; public class TestMetaStoreAuthorization extends TestCase { @@ -75,7 +75,7 @@ public void testIsWritable() throws Exception { public void testMetaStoreAuthorization() throws Exception { setup(); - MetaStoreUtils.startMetaStore(port, ShimLoader.getHadoopThriftAuthBridge()); + MetaStoreUtils.startMetaStore(port, HadoopThriftAuthBridge.getBridge()); HiveMetaStoreClient client = new HiveMetaStoreClient(conf); FileSystem fs = null; diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreEndFunctionListener.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreEndFunctionListener.java index 1e78ff1b6ff8..4f559d07f485 100644 --- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreEndFunctionListener.java +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreEndFunctionListener.java @@ -25,9 +25,10 @@ import org.apache.hadoop.hive.cli.CliSessionState; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.metastore.api.NoSuchObjectException; +import org.apache.hadoop.hive.metastore.security.HadoopThriftAuthBridge; import org.apache.hadoop.hive.ql.Driver; import org.apache.hadoop.hive.ql.session.SessionState; -import org.apache.hadoop.hive.shims.ShimLoader; + /** * TestMetaStoreEventListener. Test case for * {@link org.apache.hadoop.hive.metastore.MetaStoreEndFunctionListener} @@ -48,7 +49,7 @@ protected void setUp() throws Exception { System.setProperty("hive.metastore.end.function.listeners", DummyEndFunctionListener.class.getName()); int port = MetaStoreUtils.findFreePort(); - MetaStoreUtils.startMetaStore(port, ShimLoader.getHadoopThriftAuthBridge()); + MetaStoreUtils.startMetaStore(port, HadoopThriftAuthBridge.getBridge()); hiveConf = new HiveConf(this.getClass()); hiveConf.setVar(HiveConf.ConfVars.METASTOREURIS, "thrift://localhost:" + port); hiveConf.setIntVar(HiveConf.ConfVars.METASTORETHRIFTCONNECTIONRETRIES, 3); diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreEventListener.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreEventListener.java index f3849914a755..82fb8c7379f8 100644 --- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreEventListener.java +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreEventListener.java @@ -63,10 +63,10 @@ import org.apache.hadoop.hive.metastore.events.PreDropTableEvent; import org.apache.hadoop.hive.metastore.events.PreEventContext; import org.apache.hadoop.hive.metastore.events.PreLoadPartitionDoneEvent; +import org.apache.hadoop.hive.metastore.security.HadoopThriftAuthBridge; import org.apache.hadoop.hive.ql.Driver; import org.apache.hadoop.hive.ql.processors.SetProcessor; import org.apache.hadoop.hive.ql.session.SessionState; -import org.apache.hadoop.hive.shims.ShimLoader; /** * TestMetaStoreEventListener. Test case for @@ -98,7 +98,7 @@ protected void setUp() throws Exception { hiveConf = new HiveConf(this.getClass()); hiveConf.setVar(HiveConf.ConfVars.METASTORE_PARTITION_NAME_WHITELIST_PATTERN, metaConfVal); - MetaStoreUtils.startMetaStore(port, ShimLoader.getHadoopThriftAuthBridge(), hiveConf); + MetaStoreUtils.startMetaStore(port, HadoopThriftAuthBridge.getBridge(), hiveConf); hiveConf.setVar(HiveConf.ConfVars.METASTOREURIS, "thrift://localhost:" + port); hiveConf.setIntVar(HiveConf.ConfVars.METASTORETHRIFTCONNECTIONRETRIES, 3); diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreEventListenerOnlyOnCommit.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreEventListenerOnlyOnCommit.java index 0c3e703761b3..3302937b9fc0 100644 --- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreEventListenerOnlyOnCommit.java +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreEventListenerOnlyOnCommit.java @@ -25,9 +25,9 @@ import org.apache.hadoop.hive.cli.CliSessionState; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.metastore.events.ListenerEvent; +import org.apache.hadoop.hive.metastore.security.HadoopThriftAuthBridge; import org.apache.hadoop.hive.ql.Driver; import org.apache.hadoop.hive.ql.session.SessionState; -import org.apache.hadoop.hive.shims.ShimLoader; /** * Ensure that the status of MetaStore events depend on the RawStore's commit status. @@ -51,7 +51,7 @@ protected void setUp() throws Exception { DummyRawStoreControlledCommit.class.getName()); int port = MetaStoreUtils.findFreePort(); - MetaStoreUtils.startMetaStore(port, ShimLoader.getHadoopThriftAuthBridge()); + MetaStoreUtils.startMetaStore(port, HadoopThriftAuthBridge.getBridge()); hiveConf = new HiveConf(this.getClass()); hiveConf.setVar(HiveConf.ConfVars.METASTOREURIS, "thrift://localhost:" + port); diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreInitListener.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreInitListener.java index e8171e56a668..09b5cd8445d5 100644 --- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreInitListener.java +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreInitListener.java @@ -22,10 +22,9 @@ import org.apache.hadoop.hive.cli.CliSessionState; import org.apache.hadoop.hive.conf.HiveConf; -import org.apache.hadoop.hive.metastore.api.NoSuchObjectException; +import org.apache.hadoop.hive.metastore.security.HadoopThriftAuthBridge; import org.apache.hadoop.hive.ql.Driver; import org.apache.hadoop.hive.ql.session.SessionState; -import org.apache.hadoop.hive.shims.ShimLoader; /** * TestMetaStoreInitListener. Test case for @@ -43,7 +42,7 @@ protected void setUp() throws Exception { System.setProperty("hive.metastore.init.hooks", DummyMetaStoreInitListener.class.getName()); int port = MetaStoreUtils.findFreePort(); - MetaStoreUtils.startMetaStore(port, ShimLoader.getHadoopThriftAuthBridge()); + MetaStoreUtils.startMetaStore(port, HadoopThriftAuthBridge.getBridge()); hiveConf = new HiveConf(this.getClass()); hiveConf.setVar(HiveConf.ConfVars.METASTOREURIS, "thrift://localhost:" + port); hiveConf.setIntVar(HiveConf.ConfVars.METASTORETHRIFTCONNECTIONRETRIES, 3); diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreListenersError.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreListenersError.java index d074028fb6c7..5f86c26ca253 100644 --- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreListenersError.java +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreListenersError.java @@ -22,7 +22,7 @@ import junit.framework.TestCase; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hive.metastore.api.MetaException; -import org.apache.hadoop.hive.shims.ShimLoader; +import org.apache.hadoop.hive.metastore.security.HadoopThriftAuthBridge; /** * Test for unwrapping InvocationTargetException, which is thrown from @@ -35,7 +35,7 @@ public void testInitListenerException() throws Throwable { System.setProperty("hive.metastore.init.hooks", ErrorInitListener.class.getName()); int port = MetaStoreUtils.findFreePort(); try { - HiveMetaStore.startMetaStore(port, ShimLoader.getHadoopThriftAuthBridge()); + HiveMetaStore.startMetaStore(port, HadoopThriftAuthBridge.getBridge()); } catch (Throwable throwable) { Assert.assertEquals(MetaException.class, throwable.getClass()); Assert.assertEquals( @@ -52,7 +52,7 @@ public void testEventListenerException() throws Throwable { System.setProperty("hive.metastore.event.listeners", ErrorEventListener.class.getName()); int port = MetaStoreUtils.findFreePort(); try { - HiveMetaStore.startMetaStore(port, ShimLoader.getHadoopThriftAuthBridge()); + HiveMetaStore.startMetaStore(port, HadoopThriftAuthBridge.getBridge()); } catch (Throwable throwable) { Assert.assertEquals(MetaException.class, throwable.getClass()); Assert.assertEquals( diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreMetrics.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreMetrics.java index b462b2a89326..9b6cab3207c3 100644 --- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreMetrics.java +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreMetrics.java @@ -23,9 +23,9 @@ import org.apache.hadoop.hive.common.metrics.common.MetricsFactory; import org.apache.hadoop.hive.common.metrics.metrics2.CodahaleMetrics; import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.metastore.security.HadoopThriftAuthBridge; import org.apache.hadoop.hive.ql.Driver; import org.apache.hadoop.hive.ql.session.SessionState; -import org.apache.hadoop.hive.shims.ShimLoader; import org.junit.BeforeClass; import org.junit.Test; @@ -58,7 +58,7 @@ public static void before() throws Exception { metrics = (CodahaleMetrics) MetricsFactory.getInstance(); //Increments one HMS connection - MetaStoreUtils.startMetaStore(port, ShimLoader.getHadoopThriftAuthBridge(), hiveConf); + MetaStoreUtils.startMetaStore(port, HadoopThriftAuthBridge.getBridge(), hiveConf); //Increments one HMS connection (Hive.get()) SessionState.start(new CliSessionState(hiveConf)); diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestRemoteHiveMetaStore.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestRemoteHiveMetaStore.java index 878f9131f7d2..a8f9907948eb 100644 --- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestRemoteHiveMetaStore.java +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestRemoteHiveMetaStore.java @@ -20,7 +20,7 @@ import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.conf.HiveConf.ConfVars; -import org.apache.hadoop.hive.shims.ShimLoader; +import org.apache.hadoop.hive.metastore.security.HadoopThriftAuthBridge; public class TestRemoteHiveMetaStore extends TestHiveMetaStore { @@ -44,7 +44,7 @@ protected void setUp() throws Exception { port = MetaStoreUtils.findFreePort(); System.out.println("Starting MetaStore Server on port " + port); - MetaStoreUtils.startMetaStore(port, ShimLoader.getHadoopThriftAuthBridge(), hiveConf); + MetaStoreUtils.startMetaStore(port, HadoopThriftAuthBridge.getBridge(), hiveConf); isServerStarted = true; // This is default case with setugi off for both client and server diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestRemoteHiveMetaStoreIpAddress.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestRemoteHiveMetaStoreIpAddress.java index 63eea2710a2a..31580fcd8b02 100644 --- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestRemoteHiveMetaStoreIpAddress.java +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestRemoteHiveMetaStoreIpAddress.java @@ -23,7 +23,7 @@ import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.conf.HiveConf.ConfVars; import org.apache.hadoop.hive.metastore.api.Database; -import org.apache.hadoop.hive.shims.ShimLoader; +import org.apache.hadoop.hive.metastore.security.HadoopThriftAuthBridge; import org.apache.hadoop.util.StringUtils; /** @@ -52,7 +52,7 @@ protected void setUp() throws Exception { System.out.println("Starting MetaStore Server on port " + port); System.setProperty(ConfVars.METASTORE_EVENT_LISTENERS.varname, IpAddressListener.class.getName()); - MetaStoreUtils.startMetaStore(port, ShimLoader.getHadoopThriftAuthBridge()); + MetaStoreUtils.startMetaStore(port, HadoopThriftAuthBridge.getBridge()); isServerStarted = true; // This is default case with setugi off for both client and server diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestRetryingHMSHandler.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestRetryingHMSHandler.java index 99c09b782396..a7e5eb3c44d1 100644 --- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestRetryingHMSHandler.java +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestRetryingHMSHandler.java @@ -33,11 +33,11 @@ import org.apache.hadoop.hive.metastore.api.SerDeInfo; import org.apache.hadoop.hive.metastore.api.StorageDescriptor; import org.apache.hadoop.hive.metastore.api.Table; +import org.apache.hadoop.hive.metastore.security.HadoopThriftAuthBridge; import org.apache.hadoop.hive.ql.io.HiveInputFormat; import org.apache.hadoop.hive.ql.io.HiveOutputFormat; import org.apache.hadoop.hive.serde.serdeConstants; import org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe; -import org.apache.hadoop.hive.shims.ShimLoader; /** * TestRetryingHMSHandler. Test case for @@ -54,7 +54,7 @@ protected void setUp() throws Exception { System.setProperty("hive.metastore.pre.event.listeners", AlternateFailurePreListener.class.getName()); int port = MetaStoreUtils.findFreePort(); - MetaStoreUtils.startMetaStore(port, ShimLoader.getHadoopThriftAuthBridge()); + MetaStoreUtils.startMetaStore(port, HadoopThriftAuthBridge.getBridge()); hiveConf = new HiveConf(this.getClass()); hiveConf.setVar(HiveConf.ConfVars.METASTOREURIS, "thrift://localhost:" + port); hiveConf.setIntVar(HiveConf.ConfVars.METASTORETHRIFTCONNECTIONRETRIES, 3); diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestDDLWithRemoteMetastoreSecondNamenode.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestDDLWithRemoteMetastoreSecondNamenode.java index ce8fe6074bdc..36fa81e2c6ba 100644 --- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestDDLWithRemoteMetastoreSecondNamenode.java +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestDDLWithRemoteMetastoreSecondNamenode.java @@ -31,11 +31,11 @@ import org.apache.hadoop.hive.metastore.MetaStoreUtils; import org.apache.hadoop.hive.metastore.api.Database; import org.apache.hadoop.hive.metastore.api.Index; +import org.apache.hadoop.hive.metastore.security.HadoopThriftAuthBridge; import org.apache.hadoop.hive.ql.exec.mr.ExecDriver; import org.apache.hadoop.hive.ql.metadata.*; import org.apache.hadoop.hive.ql.processors.CommandProcessorResponse; import org.apache.hadoop.hive.ql.session.SessionState; -import org.apache.hadoop.hive.shims.ShimLoader; /** * Tests DDL with remote metastore service and second namenode (HIVE-6374) @@ -81,7 +81,7 @@ protected void setUp() throws Exception { // Test with remote metastore service int port = MetaStoreUtils.findFreePort(); - MetaStoreUtils.startMetaStore(port, ShimLoader.getHadoopThriftAuthBridge()); + MetaStoreUtils.startMetaStore(port, HadoopThriftAuthBridge.getBridge()); conf.setVar(HiveConf.ConfVars.METASTOREURIS, "thrift://localhost:" + port); conf.setIntVar(HiveConf.ConfVars.METASTORETHRIFTCONNECTIONRETRIES, 3); conf.set(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false"); diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/StorageBasedMetastoreTestBase.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/StorageBasedMetastoreTestBase.java index 36f31f4d9ceb..ccf835f6a2b0 100644 --- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/StorageBasedMetastoreTestBase.java +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/StorageBasedMetastoreTestBase.java @@ -30,14 +30,13 @@ import org.apache.hadoop.hive.metastore.HiveMetaStoreClient; import org.apache.hadoop.hive.metastore.MetaStoreUtils; import org.apache.hadoop.hive.metastore.api.Database; +import org.apache.hadoop.hive.metastore.security.HadoopThriftAuthBridge; import org.apache.hadoop.hive.ql.Driver; import org.apache.hadoop.hive.ql.security.authorization.AuthorizationPreEventListener; import org.apache.hadoop.hive.ql.security.authorization.StorageBasedAuthorizationProvider; import org.apache.hadoop.hive.ql.session.SessionState; -import org.apache.hadoop.hive.shims.ShimLoader; import org.apache.hadoop.hive.shims.Utils; import org.apache.hadoop.security.UserGroupInformation; -import org.apache.hadoop.util.Shell; import org.junit.After; import org.junit.Assert; import org.junit.Before; @@ -75,7 +74,7 @@ public void setUp() throws Exception { InjectableDummyAuthenticator.class.getName()); clientHiveConf = createHiveConf(); - MetaStoreUtils.startMetaStore(port, ShimLoader.getHadoopThriftAuthBridge(), clientHiveConf); + MetaStoreUtils.startMetaStore(port, HadoopThriftAuthBridge.getBridge(), clientHiveConf); // Turn off client-side authorization clientHiveConf.setBoolVar(HiveConf.ConfVars.HIVE_AUTHORIZATION_ENABLED,false); diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/TestAuthorizationPreEventListener.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/TestAuthorizationPreEventListener.java index b0da8845fc61..3b057317d600 100644 --- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/TestAuthorizationPreEventListener.java +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/TestAuthorizationPreEventListener.java @@ -32,11 +32,11 @@ import org.apache.hadoop.hive.metastore.api.Partition; import org.apache.hadoop.hive.metastore.api.SerDeInfo; import org.apache.hadoop.hive.metastore.api.Table; +import org.apache.hadoop.hive.metastore.security.HadoopThriftAuthBridge; import org.apache.hadoop.hive.ql.Driver; import org.apache.hadoop.hive.ql.security.DummyHiveMetastoreAuthorizationProvider.AuthCallContext; import org.apache.hadoop.hive.ql.security.authorization.AuthorizationPreEventListener; import org.apache.hadoop.hive.ql.session.SessionState; -import org.apache.hadoop.hive.shims.ShimLoader; /** * TestAuthorizationPreEventListener. Test case for @@ -62,7 +62,7 @@ protected void setUp() throws Exception { System.setProperty(HiveConf.ConfVars.HIVE_METASTORE_AUTHENTICATOR_MANAGER.varname, HadoopDefaultMetastoreAuthenticator.class.getName()); - MetaStoreUtils.startMetaStore(port, ShimLoader.getHadoopThriftAuthBridge()); + MetaStoreUtils.startMetaStore(port, HadoopThriftAuthBridge.getBridge()); clientHiveConf = new HiveConf(this.getClass()); diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/TestClientSideAuthorizationProvider.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/TestClientSideAuthorizationProvider.java index d895da89fcd3..24e49ba88a19 100644 --- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/TestClientSideAuthorizationProvider.java +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/TestClientSideAuthorizationProvider.java @@ -29,11 +29,11 @@ import org.apache.hadoop.hive.metastore.MetaStoreUtils; import org.apache.hadoop.hive.metastore.api.Database; import org.apache.hadoop.hive.metastore.api.Table; +import org.apache.hadoop.hive.metastore.security.HadoopThriftAuthBridge; import org.apache.hadoop.hive.ql.Driver; import org.apache.hadoop.hive.ql.processors.CommandProcessorResponse; import org.apache.hadoop.hive.ql.security.authorization.DefaultHiveAuthorizationProvider; import org.apache.hadoop.hive.ql.session.SessionState; -import org.apache.hadoop.hive.shims.ShimLoader; import org.apache.hadoop.hive.shims.Utils; import org.apache.hadoop.security.UserGroupInformation; @@ -64,7 +64,7 @@ protected void setUp() throws Exception { System.setProperty(HiveConf.ConfVars.METASTORE_PRE_EVENT_LISTENERS.varname, ""); - MetaStoreUtils.startMetaStore(port, ShimLoader.getHadoopThriftAuthBridge()); + MetaStoreUtils.startMetaStore(port, HadoopThriftAuthBridge.getBridge()); clientHiveConf = new HiveConf(this.getClass()); diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/TestMetastoreAuthorizationProvider.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/TestMetastoreAuthorizationProvider.java index 19dc9cf85b84..8b90fa886532 100644 --- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/TestMetastoreAuthorizationProvider.java +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/TestMetastoreAuthorizationProvider.java @@ -37,6 +37,7 @@ import org.apache.hadoop.hive.metastore.api.SerDeInfo; import org.apache.hadoop.hive.metastore.api.StorageDescriptor; import org.apache.hadoop.hive.metastore.api.Table; +import org.apache.hadoop.hive.metastore.security.HadoopThriftAuthBridge; import org.apache.hadoop.hive.ql.Driver; import org.apache.hadoop.hive.ql.io.HiveInputFormat; import org.apache.hadoop.hive.ql.io.HiveOutputFormat; @@ -45,7 +46,6 @@ import org.apache.hadoop.hive.ql.security.authorization.DefaultHiveMetastoreAuthorizationProvider; import org.apache.hadoop.hive.ql.session.SessionState; import org.apache.hadoop.hive.serde.serdeConstants; -import org.apache.hadoop.hive.shims.ShimLoader; import org.apache.hadoop.hive.shims.Utils; import org.apache.hadoop.security.UserGroupInformation; @@ -99,7 +99,7 @@ protected void setUp() throws Exception { System.setProperty(HiveConf.ConfVars.HIVE_AUTHORIZATION_TABLE_OWNER_GRANTS.varname, ""); - MetaStoreUtils.startMetaStore(port, ShimLoader.getHadoopThriftAuthBridge()); + MetaStoreUtils.startMetaStore(port, HadoopThriftAuthBridge.getBridge()); clientHiveConf = createHiveConf(); diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/TestMultiAuthorizationPreEventListener.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/TestMultiAuthorizationPreEventListener.java index 5c9bf05373b5..3d9247d4bd67 100644 --- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/TestMultiAuthorizationPreEventListener.java +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/TestMultiAuthorizationPreEventListener.java @@ -27,11 +27,11 @@ import org.apache.hadoop.hive.metastore.HiveMetaStoreClient; import org.apache.hadoop.hive.metastore.MetaStoreUtils; import org.apache.hadoop.hive.metastore.api.Database; +import org.apache.hadoop.hive.metastore.security.HadoopThriftAuthBridge; import org.apache.hadoop.hive.ql.Driver; import org.apache.hadoop.hive.ql.security.DummyHiveMetastoreAuthorizationProvider.AuthCallContext; import org.apache.hadoop.hive.ql.security.authorization.AuthorizationPreEventListener; import org.apache.hadoop.hive.ql.session.SessionState; -import org.apache.hadoop.hive.shims.ShimLoader; import org.junit.BeforeClass; import org.junit.Test; @@ -62,7 +62,7 @@ public static void setUp() throws Exception { System.setProperty(HiveConf.ConfVars.HIVE_METASTORE_AUTHENTICATOR_MANAGER.varname, HadoopDefaultMetastoreAuthenticator.class.getName()); - MetaStoreUtils.startMetaStore(port, ShimLoader.getHadoopThriftAuthBridge()); + MetaStoreUtils.startMetaStore(port, HadoopThriftAuthBridge.getBridge()); clientHiveConf = new HiveConf(); diff --git a/itests/util/src/main/java/org/apache/hive/jdbc/miniHS2/MiniHS2.java b/itests/util/src/main/java/org/apache/hive/jdbc/miniHS2/MiniHS2.java index 71f9640ad217..06ddc22c6eb1 100644 --- a/itests/util/src/main/java/org/apache/hive/jdbc/miniHS2/MiniHS2.java +++ b/itests/util/src/main/java/org/apache/hive/jdbc/miniHS2/MiniHS2.java @@ -36,6 +36,7 @@ import org.apache.hadoop.hive.llap.LlapItUtils; import org.apache.hadoop.hive.llap.daemon.MiniLlapCluster; import org.apache.hadoop.hive.metastore.MetaStoreUtils; +import org.apache.hadoop.hive.metastore.security.HadoopThriftAuthBridge; import org.apache.hadoop.hive.ql.exec.Utilities; import org.apache.hadoop.hive.ql.util.ZooKeeperHiveHelper; import org.apache.hadoop.hive.shims.HadoopShims.MiniDFSShim; @@ -311,8 +312,7 @@ public void start(Map confOverlay) throws Exception { if (isMetastoreRemote) { int metaStorePort = MetaStoreUtils.findFreePort(); getHiveConf().setVar(ConfVars.METASTOREURIS, "thrift://localhost:" + metaStorePort); - MetaStoreUtils.startMetaStore(metaStorePort, - ShimLoader.getHadoopThriftAuthBridge(), getHiveConf()); + MetaStoreUtils.startMetaStore(metaStorePort, HadoopThriftAuthBridge.getBridge(), getHiveConf()); } hiveServer2 = new HiveServer2(); diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java b/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java index 370c1693d827..df01b2578c4f 100644 --- a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java +++ b/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java @@ -86,6 +86,7 @@ import org.apache.hadoop.hive.io.HdfsUtils; import org.apache.hadoop.hive.metastore.api.*; import org.apache.hadoop.hive.metastore.events.AddForeignKeyEvent; +import org.apache.hadoop.hive.metastore.conf.MetastoreConf; import org.apache.hadoop.hive.metastore.events.AddIndexEvent; import org.apache.hadoop.hive.metastore.events.AddNotNullConstraintEvent; import org.apache.hadoop.hive.metastore.events.AddPartitionEvent; @@ -127,17 +128,14 @@ import org.apache.hadoop.hive.metastore.filemeta.OrcFileMetadataHandler; import org.apache.hadoop.hive.metastore.messaging.EventMessage.EventType; import org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy; +import org.apache.hadoop.hive.metastore.security.HadoopThriftAuthBridge; +import org.apache.hadoop.hive.metastore.security.MetastoreDelegationTokenManager; +import org.apache.hadoop.hive.metastore.security.TUGIContainingTransport; import org.apache.hadoop.hive.metastore.txn.TxnStore; import org.apache.hadoop.hive.metastore.txn.TxnUtils; import org.apache.hadoop.hive.serde2.Deserializer; import org.apache.hadoop.hive.serde2.SerDeException; -import org.apache.hadoop.hive.shims.HadoopShims; -import org.apache.hadoop.hive.shims.ShimLoader; import org.apache.hadoop.hive.shims.Utils; -import org.apache.hadoop.hive.thrift.HadoopThriftAuthBridge; -import org.apache.hadoop.hive.thrift.HadoopThriftAuthBridge.Server.ServerMode; -import org.apache.hadoop.hive.thrift.HiveDelegationTokenManager; -import org.apache.hadoop.hive.thrift.TUGIContainingTransport; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.util.ReflectionUtils; import org.apache.hadoop.util.StringUtils; @@ -204,7 +202,7 @@ protected DateFormat initialValue() { public static final String PUBLIC = "public"; private static HadoopThriftAuthBridge.Server saslServer; - private static HiveDelegationTokenManager delegationTokenManager; + private static MetastoreDelegationTokenManager delegationTokenManager; private static boolean useSasl; public static final String NO_FILTER_STRING = ""; @@ -2342,9 +2340,8 @@ public void truncate_table(final String dbName, final String tableName, List sslVersionBlacklist = new ArrayList(); diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java b/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java index b37c677837d7..fb0f72a80fe3 100644 --- a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java +++ b/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java @@ -60,11 +60,11 @@ import org.apache.hadoop.hive.conf.HiveConf.ConfVars; import org.apache.hadoop.hive.conf.HiveConfUtil; import org.apache.hadoop.hive.metastore.api.*; +import org.apache.hadoop.hive.metastore.conf.MetastoreConf; import org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy; +import org.apache.hadoop.hive.metastore.security.HadoopThriftAuthBridge; import org.apache.hadoop.hive.metastore.txn.TxnUtils; -import org.apache.hadoop.hive.shims.ShimLoader; import org.apache.hadoop.hive.shims.Utils; -import org.apache.hadoop.hive.thrift.HadoopThriftAuthBridge; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.util.StringUtils; import org.apache.thrift.TApplicationException; @@ -422,8 +422,8 @@ private void open() throws MetaException { throw new IllegalArgumentException(ConfVars.HIVE_METASTORE_SSL_TRUSTSTORE_PATH.varname + " Not configured for SSL connection"); } - String trustStorePassword = ShimLoader.getHadoopShims().getPassword(conf, - HiveConf.ConfVars.HIVE_METASTORE_SSL_TRUSTSTORE_PASSWORD.varname); + String trustStorePassword = + MetastoreConf.getPassword(conf, MetastoreConf.ConfVars.SSL_TRUSTSTORE_PASSWORD); // Create an SSL socket and connect transport = HiveAuthUtils.getSSLSocket(store.getHost(), store.getPort(), clientSocketTimeout, trustStorePath, trustStorePassword ); @@ -442,7 +442,7 @@ private void open() throws MetaException { // Wrap thrift connection with SASL for secure connection. try { HadoopThriftAuthBridge.Client authBridge = - ShimLoader.getHadoopThriftAuthBridge().createClient(); + HadoopThriftAuthBridge.getBridge().createClient(); // check if we should use delegation tokens to authenticate // the call below gets hold of the tokens if they are set up by hadoop diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java b/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java index 6a543060ef08..bbe13fd77b5a 100644 --- a/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java +++ b/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java @@ -65,6 +65,8 @@ import org.apache.hadoop.hive.metastore.api.Decimal; import org.apache.hadoop.hive.metastore.api.Order; import org.apache.hadoop.hive.metastore.api.SkewedInfo; +import org.apache.hadoop.hive.metastore.security.HadoopThriftAuthBridge; +import org.apache.hadoop.hive.shims.ShimLoader; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; @@ -108,8 +110,6 @@ import org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector; import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo; import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils; -import org.apache.hadoop.hive.shims.ShimLoader; -import org.apache.hadoop.hive.thrift.HadoopThriftAuthBridge; import org.apache.hadoop.security.SaslRpcServer; import org.apache.hive.common.util.HiveStringUtils; import org.apache.hive.common.util.ReflectionUtil; @@ -1258,7 +1258,7 @@ public static void makeDir(Path path, HiveConf hiveConf) throws MetaException { } public static int startMetaStore() throws Exception { - return startMetaStore(ShimLoader.getHadoopThriftAuthBridge(), null); + return startMetaStore(HadoopThriftAuthBridge.getBridge(), null); } public static int startMetaStore(final HadoopThriftAuthBridge bridge, HiveConf conf) throws Exception { @@ -1268,7 +1268,7 @@ public static int startMetaStore(final HadoopThriftAuthBridge bridge, HiveConf c } public static int startMetaStore(HiveConf conf) throws Exception { - return startMetaStore(ShimLoader.getHadoopThriftAuthBridge(), conf); + return startMetaStore(HadoopThriftAuthBridge.getBridge(), conf); } public static void startMetaStore(final int port, final HadoopThriftAuthBridge bridge) throws Exception { diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java b/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java index 897fc4efd4c5..b87811502bea 100644 --- a/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java +++ b/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java @@ -121,6 +121,7 @@ import org.apache.hadoop.hive.metastore.api.UnknownDBException; import org.apache.hadoop.hive.metastore.api.UnknownPartitionException; import org.apache.hadoop.hive.metastore.api.UnknownTableException; +import org.apache.hadoop.hive.metastore.conf.MetastoreConf; import org.apache.hadoop.hive.metastore.model.MColumnDescriptor; import org.apache.hadoop.hive.metastore.model.MConstraint; import org.apache.hadoop.hive.metastore.model.MDBPrivilege; @@ -157,7 +158,6 @@ import org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy; import org.apache.hadoop.hive.serde2.typeinfo.PrimitiveTypeInfo; import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory; -import org.apache.hadoop.hive.shims.ShimLoader; import org.apache.hadoop.util.StringUtils; import org.apache.hive.common.util.HiveStringUtils; import org.apache.thrift.TException; @@ -493,8 +493,7 @@ private static Properties getDataSourceProps(Configuration conf) { } // Password may no longer be in the conf, use getPassword() try { - String passwd = - ShimLoader.getHadoopShims().getPassword(conf, HiveConf.ConfVars.METASTOREPWD.varname); + String passwd = MetastoreConf.getPassword(conf, MetastoreConf.ConfVars.PWD); if (passwd != null && !passwd.isEmpty()) { prop.setProperty(HiveConf.ConfVars.METASTOREPWD.varname, passwd); } diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/TUGIBasedProcessor.java b/metastore/src/java/org/apache/hadoop/hive/metastore/TUGIBasedProcessor.java index 89f4701e0b75..64f0b96b8488 100644 --- a/metastore/src/java/org/apache/hadoop/hive/metastore/TUGIBasedProcessor.java +++ b/metastore/src/java/org/apache/hadoop/hive/metastore/TUGIBasedProcessor.java @@ -25,13 +25,13 @@ import java.util.List; import java.util.Map; +import org.apache.hadoop.hive.metastore.security.TUGIContainingTransport; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface; import org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.set_ugi_args; import org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.set_ugi_result; -import org.apache.hadoop.hive.thrift.TUGIContainingTransport; import org.apache.hadoop.security.UserGroupInformation; import org.apache.thrift.ProcessFunction; import org.apache.thrift.TApplicationException; diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/Warehouse.java b/metastore/src/java/org/apache/hadoop/hive/metastore/Warehouse.java index 778550b1c8aa..1dd50ded6cbe 100755 --- a/metastore/src/java/org/apache/hadoop/hive/metastore/Warehouse.java +++ b/metastore/src/java/org/apache/hadoop/hive/metastore/Warehouse.java @@ -33,6 +33,7 @@ import java.util.regex.Pattern; import org.apache.commons.lang.StringUtils; +import org.apache.hadoop.hive.metastore.utils.HdfsUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; @@ -52,7 +53,6 @@ import org.apache.hadoop.hive.metastore.api.Partition; import org.apache.hadoop.hive.metastore.api.StorageDescriptor; import org.apache.hadoop.hive.metastore.api.Table; -import org.apache.hadoop.hive.shims.ShimLoader; import org.apache.hadoop.util.ReflectionUtils; /** @@ -252,7 +252,7 @@ public boolean isWritable(Path path) throws IOException { try { fs = getFs(path); stat = fs.getFileStatus(path); - ShimLoader.getHadoopShims().checkFileAccess(fs, stat, FsAction.WRITE); + HdfsUtils.checkFileAccess(fs, stat, FsAction.WRITE); return true; } catch (FileNotFoundException fnfe){ // File named by path doesn't exist; nothing to validate. diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnDbUtil.java b/metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnDbUtil.java index f72c379879f8..016189474069 100644 --- a/metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnDbUtil.java +++ b/metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnDbUtil.java @@ -28,10 +28,10 @@ import java.util.Properties; import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.hive.metastore.conf.MetastoreConf; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.hive.conf.HiveConf; -import org.apache.hadoop.hive.shims.ShimLoader; /** * Utility methods for creating and destroying txn database/schema, plus methods for @@ -328,8 +328,7 @@ static Connection getConnection() throws Exception { Properties prop = new Properties(); String driverUrl = HiveConf.getVar(conf, HiveConf.ConfVars.METASTORECONNECTURLKEY); String user = HiveConf.getVar(conf, HiveConf.ConfVars.METASTORE_CONNECTION_USER_NAME); - String passwd = - ShimLoader.getHadoopShims().getPassword(conf, HiveConf.ConfVars.METASTOREPWD.varname); + String passwd = MetastoreConf.getPassword(conf, MetastoreConf.ConfVars.PWD); prop.setProperty("user", user); prop.setProperty("password", passwd); Connection conn = driver.connect(driverUrl, prop); diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java b/metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java index b722af6ceb83..f3968e45c4be 100644 --- a/metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java +++ b/metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java @@ -34,6 +34,7 @@ import org.apache.hadoop.hive.metastore.DatabaseProduct; import org.apache.hadoop.hive.metastore.HouseKeeperService; import org.apache.hadoop.hive.metastore.Warehouse; +import org.apache.hadoop.hive.metastore.conf.MetastoreConf; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.commons.dbcp.PoolingDataSource; @@ -45,7 +46,6 @@ import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.conf.HiveConfUtil; import org.apache.hadoop.hive.metastore.api.*; -import org.apache.hadoop.hive.shims.ShimLoader; import org.apache.hadoop.util.StringUtils; import javax.sql.DataSource; @@ -3701,8 +3701,7 @@ private static String getMetastoreJdbcUser(HiveConf conf) { private static String getMetastoreJdbcPasswd(HiveConf conf) throws SQLException { try { - return ShimLoader.getHadoopShims().getPassword(conf, - HiveConf.ConfVars.METASTOREPWD.varname); + return MetastoreConf.getPassword(conf, MetastoreConf.ConfVars.PWD); } catch (IOException err) { throw new SQLException("Error getting metastore password", err); } diff --git a/standalone-metastore/pom.xml b/standalone-metastore/pom.xml index b8826c69a74b..2e0c51e0a4d7 100644 --- a/standalone-metastore/pom.xml +++ b/standalone-metastore/pom.xml @@ -53,6 +53,22 @@ + + org.apache.hadoop + hadoop-hdfs + ${hadoop.version} + true + + + org.slf4j + slf4j-log4j12 + + + commmons-logging + commons-logging + + + org.apache.hive diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/conf/MetastoreConf.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/conf/MetastoreConf.java index d8ec1d9c5c83..6e2a4624c693 100644 --- a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/conf/MetastoreConf.java +++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/conf/MetastoreConf.java @@ -361,6 +361,16 @@ public enum ConfVars { "The default partition name in case the dynamic partition column value is null/empty string or any other values that cannot be escaped. \n" + "This value must not contain any special character used in HDFS URI (e.g., ':', '%', '/' etc). \n" + "The user has to be aware that the dynamic partition value should not contain this value to avoid confusions."), + DELEGATION_KEY_UPDATE_INTERVAL("metastore.cluster.delegation.key.update-interval", + "hive.cluster.delegation.key.update-interval", 1, TimeUnit.DAYS, ""), + DELEGATION_TOKEN_GC_INTERVAL("metastore.cluster.delegation.token.gc-interval", + "hive.cluster.delegation.token.gc-interval", 1, TimeUnit.HOURS, ""), + DELEGATION_TOKEN_MAX_LIFETIME("metastore.cluster.delegation.token.max-lifetime", + "hive.cluster.delegation.token.max-lifetime", 7, TimeUnit.DAYS, ""), + DELEGATION_TOKEN_RENEW_INTERVAL("metastore.cluster.delegation.token.renew-interval", + "hive.cluster.delegation.token.renew-interval", 1, TimeUnit.DAYS, ""), + DELEGATION_TOKEN_STORE_CLS("metastore.cluster.delegation.token.store.class", + "hive.cluster.delegation.token.store.class", "", "Class to store delegation tokens"), DETACH_ALL_ON_COMMIT("javax.jdo.option.DetachAllOnCommit", "javax.jdo.option.DetachAllOnCommit", true, "Detaches all objects from session so that they can be used after transaction is committed"), diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/security/DelegationTokenIdentifier.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/security/DelegationTokenIdentifier.java new file mode 100644 index 000000000000..ba6c7e3f3425 --- /dev/null +++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/security/DelegationTokenIdentifier.java @@ -0,0 +1,52 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.metastore.security; + +import org.apache.hadoop.io.Text; +import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenIdentifier; + +/** + * A delegation token identifier that is specific to Hive. + */ +public class DelegationTokenIdentifier + extends AbstractDelegationTokenIdentifier { + public static final Text HIVE_DELEGATION_KIND = new Text("HIVE_DELEGATION_TOKEN"); + + /** + * Create an empty delegation token identifier for reading into. + */ + public DelegationTokenIdentifier() { + } + + /** + * Create a new delegation token identifier + * @param owner the effective username of the token owner + * @param renewer the username of the renewer + * @param realUser the real username of the token owner + */ + public DelegationTokenIdentifier(Text owner, Text renewer, Text realUser) { + super(owner, renewer, realUser); + } + + @Override + public Text getKind() { + return HIVE_DELEGATION_KIND; + } + +} diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/security/DelegationTokenSecretManager.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/security/DelegationTokenSecretManager.java new file mode 100644 index 000000000000..aae96a5dfedf --- /dev/null +++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/security/DelegationTokenSecretManager.java @@ -0,0 +1,125 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.metastore.security; + +import java.io.ByteArrayInputStream; +import java.io.DataInputStream; +import java.io.IOException; + +import org.apache.hadoop.io.Text; +import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.security.token.Token; +import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenSecretManager; + +/** + * A Hive specific delegation token secret manager. + * The secret manager is responsible for generating and accepting the password + * for each token. + */ +public class DelegationTokenSecretManager + extends AbstractDelegationTokenSecretManager { + + /** + * Create a secret manager + * @param delegationKeyUpdateInterval the number of seconds for rolling new + * secret keys. + * @param delegationTokenMaxLifetime the maximum lifetime of the delegation + * tokens + * @param delegationTokenRenewInterval how often the tokens must be renewed + * @param delegationTokenRemoverScanInterval how often the tokens are scanned + * for expired tokens + */ + public DelegationTokenSecretManager(long delegationKeyUpdateInterval, + long delegationTokenMaxLifetime, + long delegationTokenRenewInterval, + long delegationTokenRemoverScanInterval) { + super(delegationKeyUpdateInterval, delegationTokenMaxLifetime, + delegationTokenRenewInterval, delegationTokenRemoverScanInterval); + } + + @Override + public DelegationTokenIdentifier createIdentifier() { + return new DelegationTokenIdentifier(); + } + + /** + * Verify token string + * @param tokenStrForm + * @return user name + * @throws IOException + */ + public synchronized String verifyDelegationToken(String tokenStrForm) throws IOException { + Token t = new Token<>(); + t.decodeFromUrlString(tokenStrForm); + + DelegationTokenIdentifier id = getTokenIdentifier(t); + verifyToken(id, t.getPassword()); + return id.getUser().getShortUserName(); + } + + protected DelegationTokenIdentifier getTokenIdentifier(Token token) + throws IOException { + // turn bytes back into identifier for cache lookup + ByteArrayInputStream buf = new ByteArrayInputStream(token.getIdentifier()); + DataInputStream in = new DataInputStream(buf); + DelegationTokenIdentifier id = createIdentifier(); + id.readFields(in); + return id; + } + + public synchronized void cancelDelegationToken(String tokenStrForm) throws IOException { + Token t= new Token<>(); + t.decodeFromUrlString(tokenStrForm); + String user = UserGroupInformation.getCurrentUser().getUserName(); + cancelToken(t, user); + } + + public synchronized long renewDelegationToken(String tokenStrForm) throws IOException { + Token t= new Token<>(); + t.decodeFromUrlString(tokenStrForm); + String user = UserGroupInformation.getCurrentUser().getUserName(); + return renewToken(t, user); + } + + public synchronized String getDelegationToken(String renewer) throws IOException { + UserGroupInformation ugi = UserGroupInformation.getCurrentUser(); + Text owner = new Text(ugi.getUserName()); + Text realUser = null; + if (ugi.getRealUser() != null) { + realUser = new Text(ugi.getRealUser().getUserName()); + } + DelegationTokenIdentifier ident = + new DelegationTokenIdentifier(owner, new Text(renewer), realUser); + Token t = new Token<>( + ident, this); + return t.encodeToUrlString(); + } + + public String getUserFromToken(String tokenStr) throws IOException { + Token delegationToken = new Token<>(); + delegationToken.decodeFromUrlString(tokenStr); + + ByteArrayInputStream buf = new ByteArrayInputStream(delegationToken.getIdentifier()); + DataInputStream in = new DataInputStream(buf); + DelegationTokenIdentifier id = createIdentifier(); + id.readFields(in); + return id.getUser().getShortUserName(); + } +} + diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/security/DelegationTokenStore.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/security/DelegationTokenStore.java new file mode 100644 index 000000000000..0cafeff89614 --- /dev/null +++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/security/DelegationTokenStore.java @@ -0,0 +1,118 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.metastore.security; + +import java.io.Closeable; +import java.util.List; + +import org.apache.hadoop.conf.Configurable; +import org.apache.hadoop.hive.metastore.security.HadoopThriftAuthBridge.Server.ServerMode; +import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenSecretManager.DelegationTokenInformation; + +/** + * Interface for pluggable token store that can be implemented with shared external + * storage for load balancing and high availability (for example using ZooKeeper). + * Internal, store specific errors are translated into {@link TokenStoreException}. + */ +public interface DelegationTokenStore extends Configurable, Closeable { + + /** + * Exception for internal token store errors that typically cannot be handled by the caller. + */ + class TokenStoreException extends RuntimeException { + private static final long serialVersionUID = -8693819817623074083L; + + public TokenStoreException(Throwable cause) { + super(cause); + } + + public TokenStoreException(String message, Throwable cause) { + super(message, cause); + } + } + + /** + * Add new master key. The token store assigns and returns the sequence number. + * Caller needs to use the identifier to update the key (since it is embedded in the key). + * + * @param s + * @return sequence number for new key + */ + int addMasterKey(String s) throws TokenStoreException; + + /** + * Update master key (for expiration and setting store assigned sequence within key) + * @param keySeq + * @param s + * @throws TokenStoreException + */ + void updateMasterKey(int keySeq, String s) throws TokenStoreException; + + /** + * Remove key for given id. + * @param keySeq + * @return false if key no longer present, true otherwise. + */ + boolean removeMasterKey(int keySeq); + + /** + * Return all master keys. + * @return + * @throws TokenStoreException + */ + String[] getMasterKeys() throws TokenStoreException; + + /** + * Add token. If identifier is already present, token won't be added. + * @param tokenIdentifier + * @param token + * @return true if token was added, false for existing identifier + */ + boolean addToken(DelegationTokenIdentifier tokenIdentifier, + DelegationTokenInformation token) throws TokenStoreException; + + /** + * Get token. Returns null if the token does not exist. + * @param tokenIdentifier + * @return + */ + DelegationTokenInformation getToken(DelegationTokenIdentifier tokenIdentifier) + throws TokenStoreException; + + /** + * Remove token. Return value can be used by caller to detect concurrency. + * @param tokenIdentifier + * @return true if token was removed, false if it was already removed. + * @throws TokenStoreException + */ + boolean removeToken(DelegationTokenIdentifier tokenIdentifier) throws TokenStoreException; + + /** + * List of all token identifiers in the store. This is used to remove expired tokens + * and a potential scalability improvement would be to partition by master key id + * @return + */ + List getAllDelegationTokenIdentifiers() throws TokenStoreException; + + /** + * @param hmsHandler ObjectStore used by DBTokenStore + * @param smode Indicate whether this is a metastore or hiveserver2 token store + */ + void init(Object hmsHandler, ServerMode smode); + +} diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/security/HadoopThriftAuthBridge.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/security/HadoopThriftAuthBridge.java new file mode 100644 index 000000000000..c2a73eb52ea9 --- /dev/null +++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/security/HadoopThriftAuthBridge.java @@ -0,0 +1,668 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.metastore.security; + +import static org.apache.hadoop.fs.CommonConfigurationKeys.HADOOP_SECURITY_AUTHENTICATION; + +import java.io.IOException; +import java.net.InetAddress; +import java.net.Socket; +import java.security.PrivilegedAction; +import java.security.PrivilegedExceptionAction; +import java.util.Locale; +import java.util.Map; + +import javax.security.auth.callback.Callback; +import javax.security.auth.callback.CallbackHandler; +import javax.security.auth.callback.NameCallback; +import javax.security.auth.callback.PasswordCallback; +import javax.security.auth.callback.UnsupportedCallbackException; +import javax.security.sasl.AuthorizeCallback; +import javax.security.sasl.RealmCallback; +import javax.security.sasl.RealmChoiceCallback; +import javax.security.sasl.SaslException; +import javax.security.sasl.SaslServer; + +import org.apache.commons.codec.binary.Base64; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.security.SaslRpcServer; +import org.apache.hadoop.security.SaslRpcServer.AuthMethod; +import org.apache.hadoop.security.SecurityUtil; +import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod; +import org.apache.hadoop.security.token.SecretManager.InvalidToken; +import org.apache.hadoop.security.token.Token; +import org.apache.hadoop.security.token.TokenIdentifier; +import org.apache.thrift.TException; +import org.apache.thrift.TProcessor; +import org.apache.thrift.protocol.TProtocol; +import org.apache.thrift.transport.TSaslClientTransport; +import org.apache.thrift.transport.TSaslServerTransport; +import org.apache.thrift.transport.TSocket; +import org.apache.thrift.transport.TTransport; +import org.apache.thrift.transport.TTransportException; +import org.apache.thrift.transport.TTransportFactory; + +/** + * Functions that bridge Thrift's SASL transports to Hadoop's + * SASL callback handlers and authentication classes. + * HIVE-11378 This class is not directly used anymore. It now exists only as a shell to be + * extended by HadoopThriftAuthBridge23 in 0.23 shims. I have made it abstract + * to avoid maintenance errors. + */ +public abstract class HadoopThriftAuthBridge { + private static final Logger LOG = LoggerFactory.getLogger(HadoopThriftAuthBridge.class); + + // We want to have only one auth bridge. In the past this was handled by ShimLoader, but since + // we're no longer using that we'll do it here. + private static HadoopThriftAuthBridge self = null; + + public static HadoopThriftAuthBridge getBridge() { + if (self == null) { + synchronized (HadoopThriftAuthBridge.class) { + if (self == null) self = new HadoopThriftAuthBridge23(); + } + } + return self; + } + + public Client createClient() { + return new Client(); + } + + public Client createClientWithConf(String authMethod) { + UserGroupInformation ugi; + try { + ugi = UserGroupInformation.getLoginUser(); + } catch(IOException e) { + throw new IllegalStateException("Unable to get current login user: " + e, e); + } + if (loginUserHasCurrentAuthMethod(ugi, authMethod)) { + LOG.debug("Not setting UGI conf as passed-in authMethod of " + authMethod + " = current."); + return new Client(); + } else { + LOG.debug("Setting UGI conf as passed-in authMethod of " + authMethod + " != current."); + Configuration conf = new Configuration(); + conf.set(HADOOP_SECURITY_AUTHENTICATION, authMethod); + UserGroupInformation.setConfiguration(conf); + return new Client(); + } + } + + public Server createServer(String keytabFile, String principalConf) throws TTransportException { + return new Server(keytabFile, principalConf); + } + + + public String getServerPrincipal(String principalConfig, String host) + throws IOException { + String serverPrincipal = SecurityUtil.getServerPrincipal(principalConfig, host); + String names[] = SaslRpcServer.splitKerberosName(serverPrincipal); + if (names.length != 3) { + throw new IOException( + "Kerberos principal name does NOT have the expected hostname part: " + + serverPrincipal); + } + return serverPrincipal; + } + + + public UserGroupInformation getCurrentUGIWithConf(String authMethod) + throws IOException { + UserGroupInformation ugi; + try { + ugi = UserGroupInformation.getCurrentUser(); + } catch(IOException e) { + throw new IllegalStateException("Unable to get current user: " + e, e); + } + if (loginUserHasCurrentAuthMethod(ugi, authMethod)) { + LOG.debug("Not setting UGI conf as passed-in authMethod of " + authMethod + " = current."); + return ugi; + } else { + LOG.debug("Setting UGI conf as passed-in authMethod of " + authMethod + " != current."); + Configuration conf = new Configuration(); + conf.set(HADOOP_SECURITY_AUTHENTICATION, authMethod); + UserGroupInformation.setConfiguration(conf); + return UserGroupInformation.getCurrentUser(); + } + } + + /** + * Return true if the current login user is already using the given authMethod. + * + * Used above to ensure we do not create a new Configuration object and as such + * lose other settings such as the cluster to which the JVM is connected. Required + * for oozie since it does not have a core-site.xml see HIVE-7682 + */ + private boolean loginUserHasCurrentAuthMethod(UserGroupInformation ugi, String sAuthMethod) { + AuthenticationMethod authMethod; + try { + // based on SecurityUtil.getAuthenticationMethod() + authMethod = Enum.valueOf(AuthenticationMethod.class, sAuthMethod.toUpperCase(Locale.ENGLISH)); + } catch (IllegalArgumentException iae) { + throw new IllegalArgumentException("Invalid attribute value for " + + HADOOP_SECURITY_AUTHENTICATION + " of " + sAuthMethod, iae); + } + LOG.debug("Current authMethod = " + ugi.getAuthenticationMethod()); + return ugi.getAuthenticationMethod().equals(authMethod); + } + + + /** + * Read and return Hadoop SASL configuration which can be configured using + * "hadoop.rpc.protection" + * @param conf + * @return Hadoop SASL configuration + */ + + public abstract Map getHadoopSaslProperties(Configuration conf); + + public static class Client { + /** + * Create a client-side SASL transport that wraps an underlying transport. + * + * @param methodStr The authentication method to use. Currently only KERBEROS is + * supported. + * @param principalConfig The Kerberos principal of the target server. + * @param underlyingTransport The underlying transport mechanism, usually a TSocket. + * @param saslProps the sasl properties to create the client with + */ + + + public TTransport createClientTransport( + String principalConfig, String host, + String methodStr, String tokenStrForm, final TTransport underlyingTransport, + final Map saslProps) throws IOException { + final AuthMethod method = AuthMethod.valueOf(AuthMethod.class, methodStr); + + TTransport saslTransport = null; + switch (method) { + case DIGEST: + Token t= new Token<>(); + t.decodeFromUrlString(tokenStrForm); + saslTransport = new TSaslClientTransport( + method.getMechanismName(), + null, + null, SaslRpcServer.SASL_DEFAULT_REALM, + saslProps, new SaslClientCallbackHandler(t), + underlyingTransport); + return new TUGIAssumingTransport(saslTransport, UserGroupInformation.getCurrentUser()); + + case KERBEROS: + String serverPrincipal = SecurityUtil.getServerPrincipal(principalConfig, host); + final String names[] = SaslRpcServer.splitKerberosName(serverPrincipal); + if (names.length != 3) { + throw new IOException( + "Kerberos principal name does NOT have the expected hostname part: " + + serverPrincipal); + } + try { + return UserGroupInformation.getCurrentUser().doAs( + new PrivilegedExceptionAction() { + @Override + public TUGIAssumingTransport run() throws IOException { + TTransport saslTransport = new TSaslClientTransport( + method.getMechanismName(), + null, + names[0], names[1], + saslProps, null, + underlyingTransport); + return new TUGIAssumingTransport(saslTransport, UserGroupInformation.getCurrentUser()); + } + }); + } catch (InterruptedException | SaslException se) { + throw new IOException("Could not instantiate SASL transport", se); + } + + default: + throw new IOException("Unsupported authentication method: " + method); + } + } + private static class SaslClientCallbackHandler implements CallbackHandler { + private final String userName; + private final char[] userPassword; + + public SaslClientCallbackHandler(Token token) { + this.userName = encodeIdentifier(token.getIdentifier()); + this.userPassword = encodePassword(token.getPassword()); + } + + + @Override + public void handle(Callback[] callbacks) + throws UnsupportedCallbackException { + NameCallback nc = null; + PasswordCallback pc = null; + RealmCallback rc = null; + for (Callback callback : callbacks) { + if (callback instanceof RealmChoiceCallback) { + continue; + } else if (callback instanceof NameCallback) { + nc = (NameCallback) callback; + } else if (callback instanceof PasswordCallback) { + pc = (PasswordCallback) callback; + } else if (callback instanceof RealmCallback) { + rc = (RealmCallback) callback; + } else { + throw new UnsupportedCallbackException(callback, + "Unrecognized SASL client callback"); + } + } + if (nc != null) { + if (LOG.isDebugEnabled()) { + LOG.debug("SASL client callback: setting username: " + userName); + } + nc.setName(userName); + } + if (pc != null) { + if (LOG.isDebugEnabled()) { + LOG.debug("SASL client callback: setting userPassword"); + } + pc.setPassword(userPassword); + } + if (rc != null) { + if (LOG.isDebugEnabled()) { + LOG.debug("SASL client callback: setting realm: " + + rc.getDefaultText()); + } + rc.setText(rc.getDefaultText()); + } + } + + static String encodeIdentifier(byte[] identifier) { + return new String(Base64.encodeBase64(identifier)); + } + + static char[] encodePassword(byte[] password) { + return new String(Base64.encodeBase64(password)).toCharArray(); + } + } + } + + public static class Server { + public enum ServerMode { + HIVESERVER2, METASTORE + }; + + protected final UserGroupInformation realUgi; + protected DelegationTokenSecretManager secretManager; + + public Server() throws TTransportException { + try { + realUgi = UserGroupInformation.getCurrentUser(); + } catch (IOException ioe) { + throw new TTransportException(ioe); + } + } + /** + * Create a server with a kerberos keytab/principal. + */ + protected Server(String keytabFile, String principalConf) + throws TTransportException { + if (keytabFile == null || keytabFile.isEmpty()) { + throw new TTransportException("No keytab specified"); + } + if (principalConf == null || principalConf.isEmpty()) { + throw new TTransportException("No principal specified"); + } + + // Login from the keytab + String kerberosName; + try { + kerberosName = + SecurityUtil.getServerPrincipal(principalConf, "0.0.0.0"); + UserGroupInformation.loginUserFromKeytab( + kerberosName, keytabFile); + realUgi = UserGroupInformation.getLoginUser(); + assert realUgi.isFromKeytab(); + } catch (IOException ioe) { + throw new TTransportException(ioe); + } + } + + public void setSecretManager(DelegationTokenSecretManager secretManager) { + this.secretManager = secretManager; + } + + /** + * Create a TTransportFactory that, upon connection of a client socket, + * negotiates a Kerberized SASL transport. The resulting TTransportFactory + * can be passed as both the input and output transport factory when + * instantiating a TThreadPoolServer, for example. + * + * @param saslProps Map of SASL properties + */ + + public TTransportFactory createTransportFactory(Map saslProps) + throws TTransportException { + + TSaslServerTransport.Factory transFactory = createSaslServerTransportFactory(saslProps); + + return new TUGIAssumingTransportFactory(transFactory, realUgi); + } + + /** + * Create a TSaslServerTransport.Factory that, upon connection of a client + * socket, negotiates a Kerberized SASL transport. + * + * @param saslProps Map of SASL properties + */ + public TSaslServerTransport.Factory createSaslServerTransportFactory( + Map saslProps) throws TTransportException { + // Parse out the kerberos principal, host, realm. + String kerberosName = realUgi.getUserName(); + final String names[] = SaslRpcServer.splitKerberosName(kerberosName); + if (names.length != 3) { + throw new TTransportException("Kerberos principal should have 3 parts: " + kerberosName); + } + + TSaslServerTransport.Factory transFactory = new TSaslServerTransport.Factory(); + transFactory.addServerDefinition( + AuthMethod.KERBEROS.getMechanismName(), + names[0], names[1], // two parts of kerberos principal + saslProps, + new SaslRpcServer.SaslGssCallbackHandler()); + transFactory.addServerDefinition(AuthMethod.DIGEST.getMechanismName(), + null, SaslRpcServer.SASL_DEFAULT_REALM, + saslProps, new SaslDigestCallbackHandler(secretManager)); + + return transFactory; + } + + /** + * Wrap a TTransportFactory in such a way that, before processing any RPC, it + * assumes the UserGroupInformation of the user authenticated by + * the SASL transport. + */ + public TTransportFactory wrapTransportFactory(TTransportFactory transFactory) { + return new TUGIAssumingTransportFactory(transFactory, realUgi); + } + + /** + * Wrap a TProcessor in such a way that, before processing any RPC, it + * assumes the UserGroupInformation of the user authenticated by + * the SASL transport. + */ + + public TProcessor wrapProcessor(TProcessor processor) { + return new TUGIAssumingProcessor(processor, secretManager, true); + } + + /** + * Wrap a TProcessor to capture the client information like connecting userid, ip etc + */ + + public TProcessor wrapNonAssumingProcessor(TProcessor processor) { + return new TUGIAssumingProcessor(processor, secretManager, false); + } + + final static ThreadLocal remoteAddress = + new ThreadLocal() { + + @Override + protected InetAddress initialValue() { + return null; + } + }; + + public InetAddress getRemoteAddress() { + return remoteAddress.get(); + } + + final static ThreadLocal authenticationMethod = + new ThreadLocal() { + + @Override + protected AuthenticationMethod initialValue() { + return AuthenticationMethod.TOKEN; + } + }; + + private static ThreadLocal remoteUser = new ThreadLocal () { + + @Override + protected String initialValue() { + return null; + } + }; + + + public String getRemoteUser() { + return remoteUser.get(); + } + + private final static ThreadLocal userAuthMechanism = + new ThreadLocal() { + + @Override + protected String initialValue() { + return AuthMethod.KERBEROS.getMechanismName(); + } + }; + + public String getUserAuthMechanism() { + return userAuthMechanism.get(); + } + /** CallbackHandler for SASL DIGEST-MD5 mechanism */ + // This code is pretty much completely based on Hadoop's + // SaslRpcServer.SaslDigestCallbackHandler - the only reason we could not + // use that Hadoop class as-is was because it needs a Server.Connection object + // which is relevant in hadoop rpc but not here in the metastore - so the + // code below does not deal with the Connection Server.object. + static class SaslDigestCallbackHandler implements CallbackHandler { + private final DelegationTokenSecretManager secretManager; + + public SaslDigestCallbackHandler( + DelegationTokenSecretManager secretManager) { + this.secretManager = secretManager; + } + + private char[] getPassword(DelegationTokenIdentifier tokenid) throws InvalidToken { + return encodePassword(secretManager.retrievePassword(tokenid)); + } + + private char[] encodePassword(byte[] password) { + return new String(Base64.encodeBase64(password)).toCharArray(); + } + /** {@inheritDoc} */ + + @Override + public void handle(Callback[] callbacks) throws InvalidToken, + UnsupportedCallbackException { + NameCallback nc = null; + PasswordCallback pc = null; + AuthorizeCallback ac = null; + for (Callback callback : callbacks) { + if (callback instanceof AuthorizeCallback) { + ac = (AuthorizeCallback) callback; + } else if (callback instanceof NameCallback) { + nc = (NameCallback) callback; + } else if (callback instanceof PasswordCallback) { + pc = (PasswordCallback) callback; + } else if (callback instanceof RealmCallback) { + continue; // realm is ignored + } else { + throw new UnsupportedCallbackException(callback, + "Unrecognized SASL DIGEST-MD5 Callback"); + } + } + if (pc != null) { + DelegationTokenIdentifier tokenIdentifier = SaslRpcServer. + getIdentifier(nc.getDefaultName(), secretManager); + char[] password = getPassword(tokenIdentifier); + + if (LOG.isDebugEnabled()) { + LOG.debug("SASL server DIGEST-MD5 callback: setting password " + + "for client: " + tokenIdentifier.getUser()); + } + pc.setPassword(password); + } + if (ac != null) { + String authid = ac.getAuthenticationID(); + String authzid = ac.getAuthorizationID(); + if (authid.equals(authzid)) { + ac.setAuthorized(true); + } else { + ac.setAuthorized(false); + } + if (ac.isAuthorized()) { + if (LOG.isDebugEnabled()) { + String username = + SaslRpcServer.getIdentifier(authzid, secretManager).getUser().getUserName(); + LOG.debug("SASL server DIGEST-MD5 callback: setting " + + "canonicalized client ID: " + username); + } + ac.setAuthorizedID(authzid); + } + } + } + } + + /** + * Processor that pulls the SaslServer object out of the transport, and + * assumes the remote user's UGI before calling through to the original + * processor. + * + * This is used on the server side to set the UGI for each specific call. + */ + protected class TUGIAssumingProcessor implements TProcessor { + final TProcessor wrapped; + DelegationTokenSecretManager secretManager; + boolean useProxy; + TUGIAssumingProcessor(TProcessor wrapped, DelegationTokenSecretManager secretManager, + boolean useProxy) { + this.wrapped = wrapped; + this.secretManager = secretManager; + this.useProxy = useProxy; + } + + + @Override + public boolean process(final TProtocol inProt, final TProtocol outProt) throws TException { + TTransport trans = inProt.getTransport(); + if (!(trans instanceof TSaslServerTransport)) { + throw new TException("Unexpected non-SASL transport " + trans.getClass()); + } + TSaslServerTransport saslTrans = (TSaslServerTransport)trans; + SaslServer saslServer = saslTrans.getSaslServer(); + String authId = saslServer.getAuthorizationID(); + LOG.debug("AUTH ID ======>" + authId); + String endUser = authId; + + Socket socket = ((TSocket)(saslTrans.getUnderlyingTransport())).getSocket(); + remoteAddress.set(socket.getInetAddress()); + + String mechanismName = saslServer.getMechanismName(); + userAuthMechanism.set(mechanismName); + if (AuthMethod.PLAIN.getMechanismName().equalsIgnoreCase(mechanismName)) { + remoteUser.set(endUser); + return wrapped.process(inProt, outProt); + } + + authenticationMethod.set(AuthenticationMethod.KERBEROS); + if(AuthMethod.TOKEN.getMechanismName().equalsIgnoreCase(mechanismName)) { + try { + TokenIdentifier tokenId = SaslRpcServer.getIdentifier(authId, + secretManager); + endUser = tokenId.getUser().getUserName(); + authenticationMethod.set(AuthenticationMethod.TOKEN); + } catch (InvalidToken e) { + throw new TException(e.getMessage()); + } + } + + UserGroupInformation clientUgi = null; + try { + if (useProxy) { + clientUgi = UserGroupInformation.createProxyUser( + endUser, UserGroupInformation.getLoginUser()); + remoteUser.set(clientUgi.getShortUserName()); + LOG.debug("Set remoteUser :" + remoteUser.get()); + return clientUgi.doAs(new PrivilegedExceptionAction() { + + @Override + public Boolean run() { + try { + return wrapped.process(inProt, outProt); + } catch (TException te) { + throw new RuntimeException(te); + } + } + }); + } else { + // use the short user name for the request + UserGroupInformation endUserUgi = UserGroupInformation.createRemoteUser(endUser); + remoteUser.set(endUserUgi.getShortUserName()); + LOG.debug("Set remoteUser :" + remoteUser.get() + ", from endUser :" + endUser); + return wrapped.process(inProt, outProt); + } + } catch (RuntimeException rte) { + if (rte.getCause() instanceof TException) { + throw (TException)rte.getCause(); + } + throw rte; + } catch (InterruptedException ie) { + throw new RuntimeException(ie); // unexpected! + } catch (IOException ioe) { + throw new RuntimeException(ioe); // unexpected! + } + finally { + if (clientUgi != null) { + try { FileSystem.closeAllForUGI(clientUgi); } + catch(IOException exception) { + LOG.error("Could not clean up file-system handles for UGI: " + clientUgi, exception); + } + } + } + } + } + + /** + * A TransportFactory that wraps another one, but assumes a specified UGI + * before calling through. + * + * This is used on the server side to assume the server's Principal when accepting + * clients. + */ + static class TUGIAssumingTransportFactory extends TTransportFactory { + private final UserGroupInformation ugi; + private final TTransportFactory wrapped; + + public TUGIAssumingTransportFactory(TTransportFactory wrapped, UserGroupInformation ugi) { + assert wrapped != null; + assert ugi != null; + this.wrapped = wrapped; + this.ugi = ugi; + } + + + @Override + public TTransport getTransport(final TTransport trans) { + return ugi.doAs(new PrivilegedAction() { + @Override + public TTransport run() { + return wrapped.getTransport(trans); + } + }); + } + } + } +} diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/security/HadoopThriftAuthBridge23.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/security/HadoopThriftAuthBridge23.java new file mode 100644 index 000000000000..dc765359d144 --- /dev/null +++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/security/HadoopThriftAuthBridge23.java @@ -0,0 +1,114 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.metastore.security; + +import java.lang.reflect.Field; +import java.lang.reflect.Method; +import java.util.Map; + +import org.apache.hadoop.conf.Configurable; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.security.SaslRpcServer; + +/** + * Functions that bridge Thrift's SASL transports to Hadoop's SASL callback + * handlers and authentication classes. + * + * This is a 0.23/2.x specific implementation + */ +public class HadoopThriftAuthBridge23 extends HadoopThriftAuthBridge { + + private static Field SASL_PROPS_FIELD; + private static Class SASL_PROPERTIES_RESOLVER_CLASS; + private static Method RES_GET_INSTANCE_METHOD; + private static Method GET_DEFAULT_PROP_METHOD; + static { + SASL_PROPERTIES_RESOLVER_CLASS = null; + SASL_PROPS_FIELD = null; + final String SASL_PROP_RES_CLASSNAME = "org.apache.hadoop.security.SaslPropertiesResolver"; + try { + SASL_PROPERTIES_RESOLVER_CLASS = Class.forName(SASL_PROP_RES_CLASSNAME); + + } catch (ClassNotFoundException e) { + } + + if (SASL_PROPERTIES_RESOLVER_CLASS != null) { + // found the class, so this would be hadoop version 2.4 or newer (See + // HADOOP-10221, HADOOP-10451) + try { + RES_GET_INSTANCE_METHOD = SASL_PROPERTIES_RESOLVER_CLASS.getMethod("getInstance", + Configuration.class); + GET_DEFAULT_PROP_METHOD = SASL_PROPERTIES_RESOLVER_CLASS.getMethod("getDefaultProperties"); + } catch (Exception e) { + // this must be hadoop 2.4 , where getDefaultProperties was protected + } + } + + if (SASL_PROPERTIES_RESOLVER_CLASS == null || GET_DEFAULT_PROP_METHOD == null) { + // this must be a hadoop 2.4 version or earlier. + // Resorting to the earlier method of getting the properties, which uses SASL_PROPS field + try { + SASL_PROPS_FIELD = SaslRpcServer.class.getField("SASL_PROPS"); + } catch (NoSuchFieldException e) { + // Older version of hadoop should have had this field + throw new IllegalStateException("Error finding hadoop SASL_PROPS field in " + + SaslRpcServer.class.getSimpleName(), e); + } + } + } + + // TODO RIVEN switch this back to package level when we can move TestHadoopAuthBridge23 into + // riven. + // Package permission so that HadoopThriftAuthBridge can construct it but others cannot. + protected HadoopThriftAuthBridge23() { + + } + + /** + * Read and return Hadoop SASL configuration which can be configured using + * "hadoop.rpc.protection" + * + * @param conf + * @return Hadoop SASL configuration + */ + @SuppressWarnings("unchecked") + @Override + public Map getHadoopSaslProperties(Configuration conf) { + if (SASL_PROPS_FIELD != null) { + // hadoop 2.4 and earlier way of finding the sasl property settings + // Initialize the SaslRpcServer to ensure QOP parameters are read from + // conf + SaslRpcServer.init(conf); + try { + return (Map) SASL_PROPS_FIELD.get(null); + } catch (Exception e) { + throw new IllegalStateException("Error finding hadoop SASL properties", e); + } + } + // 2.5 and later way of finding sasl property + try { + Configurable saslPropertiesResolver = (Configurable) RES_GET_INSTANCE_METHOD.invoke(null, + conf); + saslPropertiesResolver.setConf(conf); + return (Map) GET_DEFAULT_PROP_METHOD.invoke(saslPropertiesResolver); + } catch (Exception e) { + throw new IllegalStateException("Error finding hadoop SASL properties", e); + } + } + +} diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/security/MemoryTokenStore.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/security/MemoryTokenStore.java new file mode 100644 index 000000000000..c484cd3132d0 --- /dev/null +++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/security/MemoryTokenStore.java @@ -0,0 +1,136 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.metastore.security; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.atomic.AtomicInteger; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hive.metastore.security.HadoopThriftAuthBridge.Server.ServerMode; +import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenSecretManager.DelegationTokenInformation; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * Default in-memory token store implementation. + */ +public class MemoryTokenStore implements DelegationTokenStore { + private static final Logger LOG = LoggerFactory.getLogger(MemoryTokenStore.class); + + private final Map masterKeys = new ConcurrentHashMap<>(); + + private final ConcurrentHashMap tokens + = new ConcurrentHashMap<>(); + + private final AtomicInteger masterKeySeq = new AtomicInteger(); + private Configuration conf; + + @Override + public void setConf(Configuration conf) { + this.conf = conf; + } + + @Override + public Configuration getConf() { + return this.conf; + } + + @Override + public int addMasterKey(String s) { + int keySeq = masterKeySeq.getAndIncrement(); + if (LOG.isTraceEnabled()) { + LOG.trace("addMasterKey: s = " + s + ", keySeq = " + keySeq); + } + masterKeys.put(keySeq, s); + return keySeq; + } + + @Override + public void updateMasterKey(int keySeq, String s) { + if (LOG.isTraceEnabled()) { + LOG.trace("updateMasterKey: s = " + s + ", keySeq = " + keySeq); + } + masterKeys.put(keySeq, s); + } + + @Override + public boolean removeMasterKey(int keySeq) { + if (LOG.isTraceEnabled()) { + LOG.trace("removeMasterKey: keySeq = " + keySeq); + } + return masterKeys.remove(keySeq) != null; + } + + @Override + public String[] getMasterKeys() { + return masterKeys.values().toArray(new String[0]); + } + + @Override + public boolean addToken(DelegationTokenIdentifier tokenIdentifier, + DelegationTokenInformation token) { + DelegationTokenInformation tokenInfo = tokens.putIfAbsent(tokenIdentifier, token); + if (LOG.isTraceEnabled()) { + LOG.trace("addToken: tokenIdentifier = " + tokenIdentifier + ", added = " + (tokenInfo == null)); + } + return (tokenInfo == null); + } + + @Override + public boolean removeToken(DelegationTokenIdentifier tokenIdentifier) { + DelegationTokenInformation tokenInfo = tokens.remove(tokenIdentifier); + if (LOG.isTraceEnabled()) { + LOG.trace("removeToken: tokenIdentifier = " + tokenIdentifier + ", removed = " + (tokenInfo != null)); + } + return tokenInfo != null; + } + + @Override + public DelegationTokenInformation getToken(DelegationTokenIdentifier tokenIdentifier) { + DelegationTokenInformation result = tokens.get(tokenIdentifier); + if (LOG.isTraceEnabled()) { + LOG.trace("getToken: tokenIdentifier = " + tokenIdentifier + ", result = " + result); + } + return result; + } + + @Override + public List getAllDelegationTokenIdentifiers() { + List result = new ArrayList<>( + tokens.size()); + for (DelegationTokenIdentifier id : tokens.keySet()) { + result.add(id); + } + return result; + } + + @Override + public void close() throws IOException { + //no-op + } + + @Override + public void init(Object hmsHandler, ServerMode smode) throws TokenStoreException { + // no-op + } +} diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/security/MetastoreDelegationTokenManager.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/security/MetastoreDelegationTokenManager.java new file mode 100644 index 000000000000..a5a1c0f8bec7 --- /dev/null +++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/security/MetastoreDelegationTokenManager.java @@ -0,0 +1,166 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + +package org.apache.hadoop.hive.metastore.security; + +import java.io.IOException; +import java.security.PrivilegedExceptionAction; +import java.util.concurrent.TimeUnit; + +import org.apache.commons.lang.StringUtils; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hive.metastore.conf.MetastoreConf; +import org.apache.hadoop.io.Text; +import org.apache.hadoop.security.token.Token; +import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.security.authorize.ProxyUsers; +import org.apache.hadoop.util.ReflectionUtils; + +public class MetastoreDelegationTokenManager { + + protected DelegationTokenSecretManager secretManager; + + public MetastoreDelegationTokenManager() { + } + + public DelegationTokenSecretManager getSecretManager() { + return secretManager; + } + + public void startDelegationTokenSecretManager(Configuration conf, Object hms, HadoopThriftAuthBridge.Server.ServerMode smode) + throws IOException { + long secretKeyInterval = MetastoreConf.getTimeVar(conf, + MetastoreConf.ConfVars.DELEGATION_KEY_UPDATE_INTERVAL, TimeUnit.MILLISECONDS); + long tokenMaxLifetime = MetastoreConf.getTimeVar(conf, + MetastoreConf.ConfVars.DELEGATION_TOKEN_MAX_LIFETIME, TimeUnit.MILLISECONDS); + long tokenRenewInterval = MetastoreConf.getTimeVar(conf, + MetastoreConf.ConfVars.DELEGATION_TOKEN_RENEW_INTERVAL, TimeUnit.MILLISECONDS); + long tokenGcInterval = MetastoreConf.getTimeVar(conf, + MetastoreConf.ConfVars.DELEGATION_TOKEN_GC_INTERVAL, TimeUnit.MILLISECONDS); + + DelegationTokenStore dts = getTokenStore(conf); + dts.setConf(conf); + dts.init(hms, smode); + secretManager = + new TokenStoreDelegationTokenSecretManager(secretKeyInterval, tokenMaxLifetime, + tokenRenewInterval, tokenGcInterval, dts); + secretManager.startThreads(); + } + + public String getDelegationToken(final String owner, final String renewer, String remoteAddr) + throws IOException, + InterruptedException { + /* + * If the user asking the token is same as the 'owner' then don't do + * any proxy authorization checks. For cases like oozie, where it gets + * a delegation token for another user, we need to make sure oozie is + * authorized to get a delegation token. + */ + // Do all checks on short names + UserGroupInformation currUser = UserGroupInformation.getCurrentUser(); + UserGroupInformation ownerUgi = UserGroupInformation.createRemoteUser(owner); + if (!ownerUgi.getShortUserName().equals(currUser.getShortUserName())) { + // in the case of proxy users, the getCurrentUser will return the + // real user (for e.g. oozie) due to the doAs that happened just before the + // server started executing the method getDelegationToken in the MetaStore + ownerUgi = UserGroupInformation.createProxyUser(owner, UserGroupInformation.getCurrentUser()); + ProxyUsers.authorize(ownerUgi, remoteAddr, null); + } + return ownerUgi.doAs(new PrivilegedExceptionAction() { + + @Override + public String run() throws IOException { + return secretManager.getDelegationToken(renewer); + } + }); + } + + public String getDelegationTokenWithService(String owner, String renewer, String service, String remoteAddr) + throws IOException, InterruptedException { + String token = getDelegationToken(owner, renewer, remoteAddr); + return addServiceToToken(token, service); + } + + public long renewDelegationToken(String tokenStrForm) + throws IOException { + return secretManager.renewDelegationToken(tokenStrForm); + } + + public String getUserFromToken(String tokenStr) throws IOException { + return secretManager.getUserFromToken(tokenStr); + } + + public void cancelDelegationToken(String tokenStrForm) throws IOException { + secretManager.cancelDelegationToken(tokenStrForm); + } + + /** + * Verify token string + * @param tokenStrForm + * @return user name + * @throws IOException + */ + public String verifyDelegationToken(String tokenStrForm) throws IOException { + return secretManager.verifyDelegationToken(tokenStrForm); + } + + private DelegationTokenStore getTokenStore(Configuration conf) throws IOException { + String tokenStoreClassName = + MetastoreConf.getVar(conf, MetastoreConf.ConfVars.DELEGATION_TOKEN_STORE_CLS, ""); + if (StringUtils.isBlank(tokenStoreClassName)) { + return new MemoryTokenStore(); + } + try { + Class storeClass = + Class.forName(tokenStoreClassName).asSubclass(DelegationTokenStore.class); + return ReflectionUtils.newInstance(storeClass, conf); + } catch (ClassNotFoundException e) { + throw new IOException("Error initializing delegation token store: " + tokenStoreClassName, e); + } + } + + /** + * Add a given service to delegation token string. + * @param tokenStr + * @param tokenService + * @return + * @throws IOException + */ + public static String addServiceToToken(String tokenStr, String tokenService) + throws IOException { + Token delegationToken = createToken(tokenStr, tokenService); + return delegationToken.encodeToUrlString(); + } + + /** + * Create a new token using the given string and service + * @param tokenStr + * @param tokenService + * @return + * @throws IOException + */ + private static Token createToken(String tokenStr, String tokenService) + throws IOException { + Token delegationToken = new Token<>(); + delegationToken.decodeFromUrlString(tokenStr); + delegationToken.setService(new Text(tokenService)); + return delegationToken; + } + +} diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/security/TFilterTransport.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/security/TFilterTransport.java new file mode 100644 index 000000000000..79d317a63641 --- /dev/null +++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/security/TFilterTransport.java @@ -0,0 +1,99 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.metastore.security; + +import org.apache.thrift.transport.TTransport; +import org.apache.thrift.transport.TTransportException; + +/** + * Transport that simply wraps another transport. + * This is the equivalent of FilterInputStream for Thrift transports. + */ + public class TFilterTransport extends TTransport { + protected final TTransport wrapped; + + public TFilterTransport(TTransport wrapped) { + this.wrapped = wrapped; + } + + @Override + public void open() throws TTransportException { + wrapped.open(); + } + + @Override + public boolean isOpen() { + return wrapped.isOpen(); + } + + @Override + public boolean peek() { + return wrapped.peek(); + } + + @Override + public void close() { + wrapped.close(); + } + + @Override + public int read(byte[] buf, int off, int len) throws TTransportException { + return wrapped.read(buf, off, len); + } + + @Override + public int readAll(byte[] buf, int off, int len) throws TTransportException { + return wrapped.readAll(buf, off, len); + } + + @Override + public void write(byte[] buf) throws TTransportException { + wrapped.write(buf); + } + + @Override + public void write(byte[] buf, int off, int len) throws TTransportException { + wrapped.write(buf, off, len); + } + + @Override + public void flush() throws TTransportException { + wrapped.flush(); + } + + @Override + public byte[] getBuffer() { + return wrapped.getBuffer(); + } + + @Override + public int getBufferPosition() { + return wrapped.getBufferPosition(); + } + + @Override + public int getBytesRemainingInBuffer() { + return wrapped.getBytesRemainingInBuffer(); + } + + @Override + public void consumeBuffer(int len) { + wrapped.consumeBuffer(len); + } + } diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/security/TUGIAssumingTransport.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/security/TUGIAssumingTransport.java new file mode 100644 index 000000000000..38a946e2d2f8 --- /dev/null +++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/security/TUGIAssumingTransport.java @@ -0,0 +1,73 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.metastore.security; + +import java.io.IOException; +import java.security.PrivilegedExceptionAction; + +import org.apache.hadoop.security.UserGroupInformation; +import org.apache.thrift.transport.TTransport; +import org.apache.thrift.transport.TTransportException; + +/** + * The Thrift SASL transports call Sasl.createSaslServer and Sasl.createSaslClient + * inside open(). So, we need to assume the correct UGI when the transport is opened + * so that the SASL mechanisms have access to the right principal. This transport + * wraps the Sasl transports to set up the right UGI context for open(). + * + * This is used on the client side, where the API explicitly opens a transport to + * the server. + */ + public class TUGIAssumingTransport extends TFilterTransport { + protected UserGroupInformation ugi; + + public TUGIAssumingTransport(TTransport wrapped, UserGroupInformation ugi) { + super(wrapped); + this.ugi = ugi; + } + + @Override + public void open() throws TTransportException { + try { + ugi.doAs(new PrivilegedExceptionAction() { + public Void run() { + try { + wrapped.open(); + } catch (TTransportException tte) { + // Wrap the transport exception in an RTE, since UGI.doAs() then goes + // and unwraps this for us out of the doAs block. We then unwrap one + // more time in our catch clause to get back the TTE. (ugh) + throw new RuntimeException(tte); + } + return null; + } + }); + } catch (IOException ioe) { + throw new RuntimeException("Received an ioe we never threw!", ioe); + } catch (InterruptedException ie) { + throw new RuntimeException("Received an ie we never threw!", ie); + } catch (RuntimeException rte) { + if (rte.getCause() instanceof TTransportException) { + throw (TTransportException)rte.getCause(); + } else { + throw rte; + } + } + } + } diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/security/TUGIContainingTransport.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/security/TUGIContainingTransport.java new file mode 100644 index 000000000000..acfe94988a94 --- /dev/null +++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/security/TUGIContainingTransport.java @@ -0,0 +1,96 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.metastore.security; + +import java.net.Socket; +import java.util.concurrent.ConcurrentMap; + +import org.apache.hadoop.security.UserGroupInformation; +import org.apache.thrift.transport.TSocket; +import org.apache.thrift.transport.TTransport; +import org.apache.thrift.transport.TTransportFactory; + +import com.google.common.collect.MapMaker; + +/** TUGIContainingTransport associates ugi information with connection (transport). + * Wraps underlying TSocket transport and annotates it with ugi. +*/ + +public class TUGIContainingTransport extends TFilterTransport { + + private UserGroupInformation ugi; + + public TUGIContainingTransport(TTransport wrapped) { + super(wrapped); + } + + public UserGroupInformation getClientUGI(){ + return ugi; + } + + public void setClientUGI(UserGroupInformation ugi){ + this.ugi = ugi; + } + + /** + * If the underlying TTransport is an instance of TSocket, it returns the Socket object + * which it contains. Otherwise it returns null. + */ + public Socket getSocket() { + if (wrapped instanceof TSocket) { + return (((TSocket)wrapped).getSocket()); + } + + return null; + } + + /** Factory to create TUGIContainingTransport. + */ + + public static class Factory extends TTransportFactory { + + // Need a concurrent weakhashmap. WeakKeys() so that when underlying transport gets out of + // scope, it still can be GC'ed. Since value of map has a ref to key, need weekValues as well. + private static final ConcurrentMap transMap = + new MapMaker().weakKeys().weakValues().makeMap(); + + /** + * Get a new TUGIContainingTransport instance, or reuse the + * existing one if a TUGIContainingTransport has already been + * created before using the given TTransport as an underlying + * transport. This ensures that a given underlying transport instance + * receives the same TUGIContainingTransport. + */ + @Override + public TUGIContainingTransport getTransport(TTransport trans) { + + // UGI information is not available at connection setup time, it will be set later + // via set_ugi() rpc. + TUGIContainingTransport tugiTrans = transMap.get(trans); + if (tugiTrans == null) { + tugiTrans = new TUGIContainingTransport(trans); + TUGIContainingTransport prev = transMap.putIfAbsent(trans, tugiTrans); + if (prev != null) { + return prev; + } + } + return tugiTrans; + } + } +} diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/security/TokenStoreDelegationTokenSecretManager.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/security/TokenStoreDelegationTokenSecretManager.java new file mode 100644 index 000000000000..4abcec789476 --- /dev/null +++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/security/TokenStoreDelegationTokenSecretManager.java @@ -0,0 +1,333 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.metastore.security; + +import java.io.ByteArrayInputStream; +import java.io.ByteArrayOutputStream; +import java.io.DataInputStream; +import java.io.DataOutputStream; +import java.io.IOException; +import java.lang.reflect.Method; +import java.util.Arrays; +import java.util.HashMap; +import java.util.Iterator; +import java.util.List; +import java.util.Map; + +import org.apache.commons.codec.binary.Base64; +import org.apache.hadoop.io.Writable; +import org.apache.hadoop.security.token.Token; +import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenSecretManager; +import org.apache.hadoop.security.token.delegation.DelegationKey; +import org.apache.hadoop.security.token.delegation.MetastoreDelegationTokenSupport; +import org.apache.hadoop.util.Daemon; +import org.apache.hadoop.util.StringUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * Extension of {@link DelegationTokenSecretManager} to support alternative to default in-memory + * token management for fail-over and clustering through plug-able token store (ZooKeeper etc.). + * Delegation tokens will be retrieved from the store on-demand and (unlike base class behavior) not + * cached in memory. This avoids complexities related to token expiration. The security token is + * needed only at the time the transport is opened (as opposed to per interface operation). The + * assumption therefore is low cost of interprocess token retrieval (for random read efficient store + * such as ZooKeeper) compared to overhead of synchronizing per-process in-memory token caches. + * The wrapper incorporates the token store abstraction within the limitations of current + * Hive/Hadoop dependency (.20S) with minimum code duplication. + * Eventually this should be supported by Hadoop security directly. + */ +public class TokenStoreDelegationTokenSecretManager extends DelegationTokenSecretManager { + + private static final Logger LOGGER = + LoggerFactory.getLogger(TokenStoreDelegationTokenSecretManager.class.getName()); + + final private long keyUpdateInterval; + final private long tokenRemoverScanInterval; + private Thread tokenRemoverThread; + + final private DelegationTokenStore tokenStore; + + public TokenStoreDelegationTokenSecretManager(long delegationKeyUpdateInterval, + long delegationTokenMaxLifetime, long delegationTokenRenewInterval, + long delegationTokenRemoverScanInterval, + DelegationTokenStore sharedStore) { + super(delegationKeyUpdateInterval, delegationTokenMaxLifetime, delegationTokenRenewInterval, + delegationTokenRemoverScanInterval); + this.keyUpdateInterval = delegationKeyUpdateInterval; + this.tokenRemoverScanInterval = delegationTokenRemoverScanInterval; + + this.tokenStore = sharedStore; + } + + protected Map reloadKeys() { + // read keys from token store + String[] allKeys = tokenStore.getMasterKeys(); + Map keys = new HashMap<>(allKeys.length); + for (String keyStr : allKeys) { + DelegationKey key = new DelegationKey(); + try { + decodeWritable(key, keyStr); + keys.put(key.getKeyId(), key); + } catch (IOException ex) { + LOGGER.error("Failed to load master key.", ex); + } + } + synchronized (this) { + super.allKeys.clear(); + super.allKeys.putAll(keys); + } + return keys; + } + + @Override + public byte[] retrievePassword(DelegationTokenIdentifier identifier) throws InvalidToken { + DelegationTokenInformation info = this.tokenStore.getToken(identifier); + if (info == null) { + throw new InvalidToken("token expired or does not exist: " + identifier); + } + // must reuse super as info.getPassword is not accessible + synchronized (this) { + try { + super.currentTokens.put(identifier, info); + return super.retrievePassword(identifier); + } finally { + super.currentTokens.remove(identifier); + } + } + } + + @Override + public DelegationTokenIdentifier cancelToken(Token token, + String canceller) throws IOException { + DelegationTokenIdentifier id = getTokenIdentifier(token); + LOGGER.info("Token cancelation requested for identifier: "+id); + this.tokenStore.removeToken(id); + return id; + } + + /** + * Create the password and add it to shared store. + */ + @Override + protected byte[] createPassword(DelegationTokenIdentifier id) { + byte[] password; + DelegationTokenInformation info; + synchronized (this) { + password = super.createPassword(id); + // add new token to shared store + // need to persist expiration along with password + info = super.currentTokens.remove(id); + if (info == null) { + throw new IllegalStateException("Failed to retrieve token after creation"); + } + } + this.tokenStore.addToken(id, info); + return password; + } + + @Override + public long renewToken(Token token, String renewer) throws IOException { + // since renewal is KERBEROS authenticated token may not be cached + final DelegationTokenIdentifier id = getTokenIdentifier(token); + DelegationTokenInformation tokenInfo = this.tokenStore.getToken(id); + if (tokenInfo == null) { + throw new InvalidToken("token does not exist: " + id); // no token found + } + // ensure associated master key is available + if (!super.allKeys.containsKey(id.getMasterKeyId())) { + LOGGER.info("Unknown master key (id={}), (re)loading keys from token store.", + id.getMasterKeyId()); + reloadKeys(); + } + // reuse super renewal logic + synchronized (this) { + super.currentTokens.put(id, tokenInfo); + try { + return super.renewToken(token, renewer); + } finally { + super.currentTokens.remove(id); + } + } + } + + public static String encodeWritable(Writable key) throws IOException { + ByteArrayOutputStream bos = new ByteArrayOutputStream(); + DataOutputStream dos = new DataOutputStream(bos); + key.write(dos); + dos.flush(); + return Base64.encodeBase64URLSafeString(bos.toByteArray()); + } + + public static void decodeWritable(Writable w, String idStr) throws IOException { + DataInputStream in = new DataInputStream(new ByteArrayInputStream(Base64.decodeBase64(idStr))); + w.readFields(in); + } + + /** + * Synchronize master key updates / sequence generation for multiple nodes. + * NOTE: {@link AbstractDelegationTokenSecretManager} keeps currentKey private, so we need + * to utilize this "hook" to manipulate the key through the object reference. + * This .20S workaround should cease to exist when Hadoop supports token store. + */ + @Override + protected void logUpdateMasterKey(DelegationKey key) throws IOException { + int keySeq = this.tokenStore.addMasterKey(encodeWritable(key)); + // update key with assigned identifier + DelegationKey keyWithSeq = new DelegationKey(keySeq, key.getExpiryDate(), key.getKey()); + String keyStr = encodeWritable(keyWithSeq); + this.tokenStore.updateMasterKey(keySeq, keyStr); + decodeWritable(key, keyStr); + LOGGER.info("New master key with key id={}", key.getKeyId()); + super.logUpdateMasterKey(key); + } + + @Override + public synchronized void startThreads() throws IOException { + try { + // updateCurrentKey needs to be called to initialize the master key + // (there should be a null check added in the future in rollMasterKey) + // updateCurrentKey(); + Method m = AbstractDelegationTokenSecretManager.class.getDeclaredMethod("updateCurrentKey"); + m.setAccessible(true); + m.invoke(this); + } catch (Exception e) { + throw new IOException("Failed to initialize master key", e); + } + running = true; + tokenRemoverThread = new Daemon(new ExpiredTokenRemover()); + tokenRemoverThread.start(); + } + + @Override + public synchronized void stopThreads() { + if (LOGGER.isDebugEnabled()) { + LOGGER.debug("Stopping expired delegation token remover thread"); + } + running = false; + if (tokenRemoverThread != null) { + tokenRemoverThread.interrupt(); + } + } + + /** + * Remove expired tokens. Replaces logic in {@link AbstractDelegationTokenSecretManager} + * that cannot be reused due to private method access. Logic here can more efficiently + * deal with external token store by only loading into memory the minimum data needed. + */ + protected void removeExpiredTokens() { + long now = System.currentTimeMillis(); + Iterator i = tokenStore.getAllDelegationTokenIdentifiers() + .iterator(); + while (i.hasNext()) { + DelegationTokenIdentifier id = i.next(); + if (now > id.getMaxDate()) { + this.tokenStore.removeToken(id); // no need to look at token info + } else { + // get token info to check renew date + DelegationTokenInformation tokenInfo = tokenStore.getToken(id); + if (tokenInfo != null) { + if (now > tokenInfo.getRenewDate()) { + this.tokenStore.removeToken(id); + } + } + } + } + } + + /** + * Extension of rollMasterKey to remove expired keys from store. + * + * @throws IOException + */ + protected void rollMasterKeyExt() throws IOException { + Map keys = reloadKeys(); + int currentKeyId = super.currentId; + MetastoreDelegationTokenSupport.rollMasterKey(TokenStoreDelegationTokenSecretManager.this); + List keysAfterRoll = Arrays.asList(getAllKeys()); + for (DelegationKey key : keysAfterRoll) { + keys.remove(key.getKeyId()); + if (key.getKeyId() == currentKeyId) { + tokenStore.updateMasterKey(currentKeyId, encodeWritable(key)); + } + } + for (DelegationKey expiredKey : keys.values()) { + LOGGER.info("Removing expired key id={}", expiredKey.getKeyId()); + try { + tokenStore.removeMasterKey(expiredKey.getKeyId()); + } catch (Exception e) { + LOGGER.error("Error removing expired key id={}", expiredKey.getKeyId(), e); + } + } + } + + /** + * Cloned from {@link AbstractDelegationTokenSecretManager} to deal with private access + * restriction (there would not be an need to clone the remove thread if the remove logic was + * protected/extensible). + */ + protected class ExpiredTokenRemover extends Thread { + private long lastMasterKeyUpdate; + private long lastTokenCacheCleanup; + + @Override + public void run() { + LOGGER.info("Starting expired delegation token remover thread, " + + "tokenRemoverScanInterval=" + tokenRemoverScanInterval + / (60 * 1000) + " min(s)"); + while (running) { + try { + long now = System.currentTimeMillis(); + if (lastMasterKeyUpdate + keyUpdateInterval < now) { + try { + rollMasterKeyExt(); + lastMasterKeyUpdate = now; + } catch (IOException e) { + LOGGER.error("Master key updating failed. " + + StringUtils.stringifyException(e)); + } + } + if (lastTokenCacheCleanup + tokenRemoverScanInterval < now) { + removeExpiredTokens(); + lastTokenCacheCleanup = now; + } + try { + Thread.sleep(5000); // 5 seconds + } catch (InterruptedException ie) { + LOGGER + .error("InterruptedException received for ExpiredTokenRemover thread " + + ie); + } + } catch (Throwable t) { + LOGGER.error("ExpiredTokenRemover thread received unexpected exception. " + + t, t); + // Wait 5 seconds too in case of an exception, so we do not end up in busy waiting for + // the solution for this exception + try { + Thread.sleep(5000); // 5 seconds + } catch (InterruptedException ie) { + LOGGER.error("InterruptedException received for ExpiredTokenRemover thread during " + + "wait in exception sleep " + ie); + } + } + } + } + } + +} diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/HdfsUtils.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/HdfsUtils.java new file mode 100644 index 000000000000..7588c9f119b1 --- /dev/null +++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/HdfsUtils.java @@ -0,0 +1,125 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.metastore.utils; + +import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.permission.FsAction; +import org.apache.hadoop.fs.permission.FsPermission; +import org.apache.hadoop.hdfs.client.HdfsAdmin; +import org.apache.hadoop.security.AccessControlException; +import org.apache.hadoop.security.UserGroupInformation; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import javax.security.auth.login.LoginException; +import java.io.FileNotFoundException; +import java.io.IOException; +import java.net.URI; + +public class HdfsUtils { + private static final Logger LOG = LoggerFactory.getLogger(HdfsUtils.class); + + /** + * Check the permissions on a file. + * @param fs Filesystem the file is contained in + * @param stat Stat info for the file + * @param action action to be performed + * @throws IOException If thrown by Hadoop + * @throws AccessControlException if the file cannot be accessed + */ + public static void checkFileAccess(FileSystem fs, FileStatus stat, FsAction action) + throws IOException, LoginException { + checkFileAccess(fs, stat, action, SecurityUtils.getUGI()); + } + + /** + * Check the permissions on a file + * @param fs Filesystem the file is contained in + * @param stat Stat info for the file + * @param action action to be performed + * @param ugi user group info for the current user. This is passed in so that tests can pass + * in mock ones. + * @throws IOException If thrown by Hadoop + * @throws AccessControlException if the file cannot be accessed + */ + @VisibleForTesting + static void checkFileAccess(FileSystem fs, FileStatus stat, FsAction action, + UserGroupInformation ugi) throws IOException { + + String user = ugi.getShortUserName(); + String[] groups = ugi.getGroupNames(); + + if (groups != null) { + String superGroupName = fs.getConf().get("dfs.permissions.supergroup", ""); + if (arrayContains(groups, superGroupName)) { + LOG.debug("User \"" + user + "\" belongs to super-group \"" + superGroupName + "\". " + + "Permission granted for action: " + action + "."); + return; + } + } + + FsPermission dirPerms = stat.getPermission(); + + if (user.equals(stat.getOwner())) { + if (dirPerms.getUserAction().implies(action)) { + return; + } + } else if (arrayContains(groups, stat.getGroup())) { + if (dirPerms.getGroupAction().implies(action)) { + return; + } + } else if (dirPerms.getOtherAction().implies(action)) { + return; + } + throw new AccessControlException("action " + action + " not permitted on path " + + stat.getPath() + " for user " + user); + } + + public static boolean isPathEncrypted(Configuration conf, URI fsUri, Path path) + throws IOException { + Path fullPath; + if (path.isAbsolute()) { + fullPath = path; + } else { + fullPath = path.getFileSystem(conf).makeQualified(path); + } + if(!"hdfs".equalsIgnoreCase(path.toUri().getScheme())) { + return false; + } + try { + HdfsAdmin hdfsAdmin = new HdfsAdmin(fsUri, conf); + return (hdfsAdmin.getEncryptionZoneForPath(fullPath) != null); + } catch (FileNotFoundException fnfe) { + LOG.debug("Failed to get EZ for non-existent path: "+ fullPath, fnfe); + return false; + } + } + + private static boolean arrayContains(String[] array, String value) { + if (array == null) return false; + for (String element : array) { + if (element.equals(value)) return true; + } + return false; + } + +} diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/SecurityUtils.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/SecurityUtils.java new file mode 100644 index 000000000000..9f0ca829b1e3 --- /dev/null +++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/SecurityUtils.java @@ -0,0 +1,40 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.metastore.utils; + +import org.apache.hadoop.security.UserGroupInformation; + +import javax.security.auth.login.LoginException; +import java.io.IOException; + +public class SecurityUtils { + public static UserGroupInformation getUGI() throws LoginException, IOException { + String doAs = System.getenv("HADOOP_USER_NAME"); + if (doAs != null && doAs.length() > 0) { + /* + * this allows doAs (proxy user) to be passed along across process boundary where + * delegation tokens are not supported. For example, a DDL stmt via WebHCat with + * a doAs parameter, forks to 'hcat' which needs to start a Session that + * proxies the end user + */ + return UserGroupInformation.createProxyUser(doAs, UserGroupInformation.getLoginUser()); + } + return UserGroupInformation.getCurrentUser(); + } + +} diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/security/token/delegation/MetastoreDelegationTokenSupport.java b/standalone-metastore/src/main/java/org/apache/hadoop/security/token/delegation/MetastoreDelegationTokenSupport.java new file mode 100644 index 000000000000..3206681bc714 --- /dev/null +++ b/standalone-metastore/src/main/java/org/apache/hadoop/security/token/delegation/MetastoreDelegationTokenSupport.java @@ -0,0 +1,68 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.security.token.delegation; + +import java.io.ByteArrayInputStream; +import java.io.ByteArrayOutputStream; +import java.io.DataInputStream; +import java.io.DataOutputStream; +import java.io.IOException; + +import org.apache.hadoop.io.WritableUtils; +import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenSecretManager.DelegationTokenInformation; + +/** + * Workaround for serialization of {@link DelegationTokenInformation} through package access. + * Future version of Hadoop should add this to DelegationTokenInformation itself. + */ +public final class MetastoreDelegationTokenSupport { + + private MetastoreDelegationTokenSupport() {} + + public static byte[] encodeDelegationTokenInformation(DelegationTokenInformation token) { + try { + ByteArrayOutputStream bos = new ByteArrayOutputStream(); + DataOutputStream out = new DataOutputStream(bos); + WritableUtils.writeVInt(out, token.password.length); + out.write(token.password); + out.writeLong(token.renewDate); + out.flush(); + return bos.toByteArray(); + } catch (IOException ex) { + throw new RuntimeException("Failed to encode token.", ex); + } + } + + public static DelegationTokenInformation decodeDelegationTokenInformation(byte[] tokenBytes) + throws IOException { + DataInputStream in = new DataInputStream(new ByteArrayInputStream(tokenBytes)); + DelegationTokenInformation token = new DelegationTokenInformation(0, null); + int len = WritableUtils.readVInt(in); + token.password = new byte[len]; + in.readFully(token.password); + token.renewDate = in.readLong(); + return token; + } + + public static void rollMasterKey( + AbstractDelegationTokenSecretManager mgr) + throws IOException { + mgr.rollMasterKey(); + } + +} diff --git a/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/utils/TestHdfsUtils.java b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/utils/TestHdfsUtils.java new file mode 100644 index 000000000000..b5f37ebd7481 --- /dev/null +++ b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/utils/TestHdfsUtils.java @@ -0,0 +1,193 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.metastore.utils; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FSDataOutputStream; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.permission.FsAction; +import org.apache.hadoop.fs.permission.FsPermission; +import org.apache.hadoop.security.AccessControlException; +import org.apache.hadoop.security.UserGroupInformation; +import org.junit.Test; +import org.mockito.Mockito; + +import javax.security.auth.login.LoginException; +import java.io.IOException; +import java.util.Random; + +public class TestHdfsUtils { + + private Random rand = new Random(); + + private Path createFile(FileSystem fs, FsPermission perms) throws IOException { + StringBuilder buf = new StringBuilder(); + for (int i = 0; i < 10; i++) { + buf.append((char)(rand.nextInt(26) + 'a')); + } + Path p = new Path(buf.toString()); + FSDataOutputStream os = fs.create(p); + os.writeBytes("Mary had a little lamb\nit's fleece was white as snow\nand anywhere that Mary " + + "went\nthe lamb was sure to go\n"); + os.close(); + fs.setPermission(p, perms); + fs.deleteOnExit(p); + return p; + } + + private Configuration makeConf() { + // Make sure that the user doesn't happen to be in the super group + Configuration conf = new Configuration(); + conf.set("dfs.permissions.supergroup", "ubermensch"); + return conf; + } + + private UserGroupInformation ugiInvalidUserValidGroups() throws LoginException, IOException { + UserGroupInformation ugi = Mockito.mock(UserGroupInformation.class); + Mockito.when(ugi.getShortUserName()).thenReturn("nosuchuser"); + Mockito.when(ugi.getGroupNames()).thenReturn(SecurityUtils.getUGI().getGroupNames()); + return ugi; + } + + private UserGroupInformation ugiInvalidUserInvalidGroups() { + UserGroupInformation ugi = Mockito.mock(UserGroupInformation.class); + Mockito.when(ugi.getShortUserName()).thenReturn("nosuchuser"); + Mockito.when(ugi.getGroupNames()).thenReturn(new String[]{"nosuchgroup"}); + return ugi; + } + + @Test + public void userReadWriteExecute() throws IOException, LoginException { + FileSystem fs = FileSystem.get(makeConf()); + Path p = createFile(fs, new FsPermission(FsAction.ALL, FsAction.NONE, FsAction.NONE)); + UserGroupInformation ugi = SecurityUtils.getUGI(); + HdfsUtils.checkFileAccess(fs, fs.getFileStatus(p), FsAction.READ, ugi); + HdfsUtils.checkFileAccess(fs, fs.getFileStatus(p), FsAction.WRITE, ugi); + HdfsUtils.checkFileAccess(fs, fs.getFileStatus(p), FsAction.EXECUTE, ugi); + } + + @Test(expected = AccessControlException.class) + public void userNoRead() throws IOException, LoginException { + FileSystem fs = FileSystem.get(makeConf()); + Path p = createFile(fs, new FsPermission(FsAction.NONE, FsAction.ALL, FsAction.ALL)); + UserGroupInformation ugi = SecurityUtils.getUGI(); + HdfsUtils.checkFileAccess(fs, fs.getFileStatus(p), FsAction.READ, ugi); + } + + @Test(expected = AccessControlException.class) + public void userNoWrite() throws IOException, LoginException { + FileSystem fs = FileSystem.get(makeConf()); + Path p = createFile(fs, new FsPermission(FsAction.NONE, FsAction.ALL, FsAction.ALL)); + UserGroupInformation ugi = SecurityUtils.getUGI(); + HdfsUtils.checkFileAccess(fs, fs.getFileStatus(p), FsAction.WRITE, ugi); + } + + @Test(expected = AccessControlException.class) + public void userNoExecute() throws IOException, LoginException { + FileSystem fs = FileSystem.get(makeConf()); + Path p = createFile(fs, new FsPermission(FsAction.NONE, FsAction.ALL, FsAction.ALL)); + UserGroupInformation ugi = SecurityUtils.getUGI(); + HdfsUtils.checkFileAccess(fs, fs.getFileStatus(p), FsAction.EXECUTE, ugi); + } + + @Test + public void groupReadWriteExecute() throws IOException, LoginException { + FileSystem fs = FileSystem.get(makeConf()); + Path p = createFile(fs, new FsPermission(FsAction.NONE, FsAction.ALL, FsAction.NONE)); + UserGroupInformation ugi = ugiInvalidUserValidGroups(); + HdfsUtils.checkFileAccess(fs, fs.getFileStatus(p), FsAction.READ, ugi); + HdfsUtils.checkFileAccess(fs, fs.getFileStatus(p), FsAction.WRITE, ugi); + HdfsUtils.checkFileAccess(fs, fs.getFileStatus(p), FsAction.EXECUTE, ugi); + } + + @Test(expected = AccessControlException.class) + public void groupNoRead() throws IOException, LoginException { + FileSystem fs = FileSystem.get(makeConf()); + Path p = createFile(fs, new FsPermission(FsAction.ALL, FsAction.NONE, FsAction.ALL)); + UserGroupInformation ugi = ugiInvalidUserValidGroups(); + HdfsUtils.checkFileAccess(fs, fs.getFileStatus(p), FsAction.READ, ugi); + } + + @Test(expected = AccessControlException.class) + public void groupNoWrite() throws IOException, LoginException { + FileSystem fs = FileSystem.get(makeConf()); + Path p = createFile(fs, new FsPermission(FsAction.ALL, FsAction.NONE, FsAction.ALL)); + UserGroupInformation ugi = ugiInvalidUserValidGroups(); + HdfsUtils.checkFileAccess(fs, fs.getFileStatus(p), FsAction.WRITE, ugi); + } + + @Test(expected = AccessControlException.class) + public void groupNoExecute() throws IOException, LoginException { + FileSystem fs = FileSystem.get(makeConf()); + Path p = createFile(fs, new FsPermission(FsAction.ALL, FsAction.NONE, FsAction.ALL)); + UserGroupInformation ugi = ugiInvalidUserValidGroups(); + HdfsUtils.checkFileAccess(fs, fs.getFileStatus(p), FsAction.EXECUTE, ugi); + } + + @Test + public void otherReadWriteExecute() throws IOException, LoginException { + FileSystem fs = FileSystem.get(makeConf()); + Path p = createFile(fs, new FsPermission(FsAction.NONE, FsAction.NONE, FsAction.ALL)); + UserGroupInformation ugi = ugiInvalidUserInvalidGroups(); + HdfsUtils.checkFileAccess(fs, fs.getFileStatus(p), FsAction.READ, ugi); + HdfsUtils.checkFileAccess(fs, fs.getFileStatus(p), FsAction.WRITE, ugi); + HdfsUtils.checkFileAccess(fs, fs.getFileStatus(p), FsAction.EXECUTE, ugi); + } + + @Test(expected = AccessControlException.class) + public void otherNoRead() throws IOException, LoginException { + FileSystem fs = FileSystem.get(makeConf()); + Path p = createFile(fs, new FsPermission(FsAction.ALL, FsAction.ALL, FsAction.NONE)); + UserGroupInformation ugi = ugiInvalidUserInvalidGroups(); + HdfsUtils.checkFileAccess(fs, fs.getFileStatus(p), FsAction.READ, ugi); + } + + @Test(expected = AccessControlException.class) + public void otherNoWrite() throws IOException, LoginException { + FileSystem fs = FileSystem.get(makeConf()); + Path p = createFile(fs, new FsPermission(FsAction.ALL, FsAction.ALL, FsAction.NONE)); + UserGroupInformation ugi = ugiInvalidUserInvalidGroups(); + HdfsUtils.checkFileAccess(fs, fs.getFileStatus(p), FsAction.WRITE, ugi); + } + + @Test(expected = AccessControlException.class) + public void otherNoExecute() throws IOException, LoginException { + FileSystem fs = FileSystem.get(makeConf()); + Path p = createFile(fs, new FsPermission(FsAction.ALL, FsAction.ALL, FsAction.NONE)); + UserGroupInformation ugi = ugiInvalidUserInvalidGroups(); + HdfsUtils.checkFileAccess(fs, fs.getFileStatus(p), FsAction.EXECUTE, ugi); + } + + @Test + public void rootReadWriteExecute() throws IOException, LoginException { + UserGroupInformation ugi = SecurityUtils.getUGI(); + FileSystem fs = FileSystem.get(new Configuration()); + String old = fs.getConf().get("dfs.permissions.supergroup"); + try { + fs.getConf().set("dfs.permissions.supergroup", ugi.getPrimaryGroupName()); + Path p = createFile(fs, new FsPermission(FsAction.NONE, FsAction.NONE, FsAction.NONE)); + HdfsUtils.checkFileAccess(fs, fs.getFileStatus(p), FsAction.READ, ugi); + HdfsUtils.checkFileAccess(fs, fs.getFileStatus(p), FsAction.WRITE, ugi); + HdfsUtils.checkFileAccess(fs, fs.getFileStatus(p), FsAction.EXECUTE, ugi); + } finally { + fs.getConf().set("dfs.permissions.supergroup", old); + } + } + +} From 185018be52c92098f8b4a33d8d76409ecc01947f Mon Sep 17 00:00:00 2001 From: Alan Gates Date: Mon, 14 Aug 2017 16:04:15 -0700 Subject: [PATCH 2/3] Fixed broken tests TestSSLWithMiniKdc and TestJdbcWithMiniHS2. --- .../apache/hadoop/hive/metastore/conf/MetastoreConf.java | 4 +++- .../security/MetastoreDelegationTokenManager.java | 8 +++++++- 2 files changed, 10 insertions(+), 2 deletions(-) diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/conf/MetastoreConf.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/conf/MetastoreConf.java index 6e2a4624c693..0fb878a2671c 100644 --- a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/conf/MetastoreConf.java +++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/conf/MetastoreConf.java @@ -19,6 +19,7 @@ import com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hive.metastore.security.MetastoreDelegationTokenManager; import org.apache.hadoop.hive.metastore.utils.StringUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -370,7 +371,8 @@ public enum ConfVars { DELEGATION_TOKEN_RENEW_INTERVAL("metastore.cluster.delegation.token.renew-interval", "hive.cluster.delegation.token.renew-interval", 1, TimeUnit.DAYS, ""), DELEGATION_TOKEN_STORE_CLS("metastore.cluster.delegation.token.store.class", - "hive.cluster.delegation.token.store.class", "", "Class to store delegation tokens"), + "hive.cluster.delegation.token.store.class", MetastoreDelegationTokenManager.class.getName(), + "Class to store delegation tokens"), DETACH_ALL_ON_COMMIT("javax.jdo.option.DetachAllOnCommit", "javax.jdo.option.DetachAllOnCommit", true, "Detaches all objects from session so that they can be used after transaction is committed"), diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/security/MetastoreDelegationTokenManager.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/security/MetastoreDelegationTokenManager.java index a5a1c0f8bec7..2b0110fe0bad 100644 --- a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/security/MetastoreDelegationTokenManager.java +++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/security/MetastoreDelegationTokenManager.java @@ -123,7 +123,13 @@ public String verifyDelegationToken(String tokenStrForm) throws IOException { private DelegationTokenStore getTokenStore(Configuration conf) throws IOException { String tokenStoreClassName = MetastoreConf.getVar(conf, MetastoreConf.ConfVars.DELEGATION_TOKEN_STORE_CLS, ""); - if (StringUtils.isBlank(tokenStoreClassName)) { + // The second half of this if is to catch cases where users are passing in a HiveConf for + // configuration. It will have set the default value of + // "hive.cluster.delegation.token.store .class" to + // "org.apache.hadoop.hive.thrift.MemoryTokenStore" as part of its construction. But this is + // the hive-shims version of the memory store. We want to convert this to our default value. + if (StringUtils.isBlank(tokenStoreClassName) || + "org.apache.hadoop.hive.thrift.MemoryTokenStore".equals(tokenStoreClassName)) { return new MemoryTokenStore(); } try { From 9d34d63f599c303d8340e3887af1a4c500f9c42d Mon Sep 17 00:00:00 2001 From: Alan Gates Date: Fri, 18 Aug 2017 15:40:41 -0700 Subject: [PATCH 3/3] Fixes based on changes in HiveMetaStoreClient. --- .../hive/metastore/HiveMetaStoreClient.java | 2 +- .../security/HadoopThriftAuthBridge.java | 16 ++++++++++++++++ 2 files changed, 17 insertions(+), 1 deletion(-) diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java b/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java index fb0f72a80fe3..b1efb8c4226e 100644 --- a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java +++ b/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java @@ -194,7 +194,7 @@ public HiveMetaStoreClient(HiveConf conf, HiveMetaHookLoader hookLoader, Boolean metastoreUris[i++] = new URI( tmpUri.getScheme(), tmpUri.getUserInfo(), - ShimLoader.getHadoopThriftAuthBridge().getCanonicalHostName(tmpUri.getHost()), + HadoopThriftAuthBridge.getBridge().getCanonicalHostName(tmpUri.getHost()), tmpUri.getPort(), tmpUri.getPath(), tmpUri.getQuery(), diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/security/HadoopThriftAuthBridge.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/security/HadoopThriftAuthBridge.java index c2a73eb52ea9..3f02ffdf1091 100644 --- a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/security/HadoopThriftAuthBridge.java +++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/security/HadoopThriftAuthBridge.java @@ -22,6 +22,7 @@ import java.io.IOException; import java.net.InetAddress; import java.net.Socket; +import java.net.UnknownHostException; import java.security.PrivilegedAction; import java.security.PrivilegedExceptionAction; import java.util.Locale; @@ -124,6 +125,21 @@ public String getServerPrincipal(String principalConfig, String host) return serverPrincipal; } + /** + * Method to get canonical-ized hostname, given a hostname (possibly a CNAME). + * This should allow for service-principals to use simplified CNAMEs. + * @param hostName The hostname to be canonical-ized. + * @return Given a CNAME, the canonical-ized hostname is returned. If not found, the original hostname is returned. + */ + public String getCanonicalHostName(String hostName) { + try { + return InetAddress.getByName(hostName).getCanonicalHostName(); + } + catch(UnknownHostException exception) { + LOG.warn("Could not retrieve canonical hostname for " + hostName, exception); + return hostName; + } + } public UserGroupInformation getCurrentUGIWithConf(String authMethod) throws IOException {