Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

HIVE-24470: Separate HiveMetaStore Thrift and Driver logic #1787

Merged
merged 8 commits into from Feb 2, 2021
Expand Up @@ -39,7 +39,7 @@
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.PathFilter;
import org.apache.hadoop.hive.conf.HiveConf;
import org.apache.hadoop.hive.metastore.HiveMetaStore.HMSHandler;
import org.apache.hadoop.hive.metastore.HMSHandler;
import org.apache.hadoop.hive.metastore.MetaStoreEventListenerConstants;
import org.apache.hadoop.hive.metastore.RawStore;
import org.apache.hadoop.hive.metastore.RawStoreProxy;
Expand Down
Expand Up @@ -55,7 +55,7 @@ public class TestMetastoreVersion {
@Before
public void setUp() throws Exception {

Field defDb = HiveMetaStore.HMSHandler.class.getDeclaredField("currentUrl");
Field defDb = HMSHandler.class.getDeclaredField("currentUrl");
defDb.setAccessible(true);
defDb.set(null, null);
// reset defaults
Expand Down Expand Up @@ -229,7 +229,7 @@ private void setVersion(HiveConf conf, String version) throws Exception {

// Load the version stored in the metastore db
public String getMetaStoreVersion() throws HiveMetaException, MetaException {
RawStore ms = HiveMetaStore.HMSHandler.getMSForConf(hiveConf);
RawStore ms = HMSHandler.getMSForConf(hiveConf);
try {
return ms.getMetaStoreSchemaVersion();
} catch (MetaException e) {
Expand All @@ -240,7 +240,7 @@ public String getMetaStoreVersion() throws HiveMetaException, MetaException {
// Store the given version and comment in the metastore
public void setMetaStoreVersion(String newVersion, String comment)
throws HiveMetaException, MetaException {
RawStore ms = HiveMetaStore.HMSHandler.getMSForConf(hiveConf);
RawStore ms = HMSHandler.getMSForConf(hiveConf);
try {
ms.setMetaStoreSchemaVersion(newVersion, comment);
} catch (MetaException e) {
Expand Down
Expand Up @@ -33,7 +33,7 @@ public class TestPartitionExpressionProxyDefault {
@Test
public void checkPartitionExpressionProxy() throws MetaException {
Configuration conf = MetastoreConf.newMetastoreConf();
HiveMetaStore.HMSHandler hms = new HiveMetaStore.HMSHandler("for testing", conf, true);
HMSHandler hms = new HMSHandler("for testing", conf, true);
Assert.assertEquals(PartitionExpressionForMetastore.class,
hms.getExpressionProxy().getClass());
}
Expand Down
Expand Up @@ -47,7 +47,7 @@ public class TestCachedStoreUpdateUsingEvents {
private RawStore rawStore;
private SharedCache sharedCache;
private Configuration conf;
private HiveMetaStore.HMSHandler hmsHandler;
private HMSHandler hmsHandler;
private String[] colType = new String[] {"double", "string"};

@Before
Expand All @@ -67,7 +67,7 @@ public void setUp() throws Exception {

TestTxnDbUtil.prepDb(conf);

hmsHandler = new HiveMetaStore.HMSHandler("testCachedStore", conf, true);
hmsHandler = new HMSHandler("testCachedStore", conf, true);

rawStore = new ObjectStore();
rawStore.setConf(hmsHandler.getConf());
Expand All @@ -81,7 +81,7 @@ public void setUp() throws Exception {
CachedStore.stopCacheUpdateService(1);

// Create the 'hive' catalog with new warehouse directory
HiveMetaStore.HMSHandler.createDefaultCatalog(rawStore, new Warehouse(conf));
HMSHandler.createDefaultCatalog(rawStore, new Warehouse(conf));
}

private Database createTestDb(String dbName, String dbOwner) throws IOException {
Expand Down
Expand Up @@ -23,7 +23,7 @@



import org.apache.hadoop.hive.metastore.HiveMetaStore.HMSHandler;
import org.apache.hadoop.hive.metastore.HMSHandler;
import org.apache.hadoop.hive.metastore.api.MetaException;
import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
import org.apache.hadoop.hive.metastore.security.DBTokenStore;
Expand Down
Expand Up @@ -34,7 +34,7 @@

import org.apache.hadoop.hive.conf.HiveConf;
import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
import org.apache.hadoop.hive.metastore.HiveMetaStore;
import org.apache.hadoop.hive.metastore.HMSHandler;
import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
import org.apache.hive.jdbc.miniHS2.MiniHS2;
import org.apache.hive.service.cli.HiveSQLException;
Expand Down Expand Up @@ -289,7 +289,7 @@ private void executeQueryExceedPartitionLimit(String query, int expectedPartitio
fail("The query should have failed, because the number of requested partitions are bigger than "
+ PARTITION_REQUEST_LIMIT);
} catch (HiveSQLException e) {
String exceedLimitMsg = String.format(HiveMetaStore.PARTITION_NUMBER_EXCEED_LIMIT_MSG, expectedPartitionNumber,
String exceedLimitMsg = String.format(HMSHandler.PARTITION_NUMBER_EXCEED_LIMIT_MSG, expectedPartitionNumber,
TABLE_NAME, PARTITION_REQUEST_LIMIT, MetastoreConf.ConfVars.LIMIT_PARTITION_REQUEST.toString());
assertTrue(getWrongExceptionMessage(exceedLimitMsg, e.getMessage()),
e.getMessage().contains(exceedLimitMsg.toString()));
Expand Down
Expand Up @@ -22,7 +22,7 @@
import com.google.common.collect.Lists;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.conf.HiveConf;
import org.apache.hadoop.hive.metastore.HiveMetaStore;
import org.apache.hadoop.hive.metastore.HMSHandler;
import org.apache.hadoop.hive.metastore.RawStore;
import org.apache.hadoop.hive.metastore.ReplChangeManager;
import org.apache.hadoop.hive.metastore.api.MetaException;
Expand Down Expand Up @@ -110,7 +110,7 @@ private void createDumpFileForTable(Context withinContext, org.apache.hadoop.hiv

private List<WriteEventInfo> getAllWriteEventInfo(Context withinContext) throws Exception {
String contextDbName = StringUtils.normalizeIdentifier(withinContext.replScope.getDbName());
RawStore rawStore = HiveMetaStore.HMSHandler.getMSForConf(withinContext.hiveConf);
RawStore rawStore = HMSHandler.getMSForConf(withinContext.hiveConf);
List<WriteEventInfo> writeEventInfoList
= rawStore.getAllWriteEventInfo(eventMessage.getTxnId(), contextDbName, null);
return ((writeEventInfoList == null)
Expand Down
Expand Up @@ -27,8 +27,7 @@

import javax.security.auth.login.LoginException;

import org.apache.hadoop.hive.conf.Constants;
import org.apache.hadoop.hive.metastore.HiveMetaStore;
import org.apache.hadoop.hive.metastore.HMSHandler;
import org.apache.hadoop.hive.metastore.IHMSHandler;
import org.apache.hadoop.hive.metastore.utils.MetaStoreServerUtils;
import org.apache.hadoop.hive.ql.hooks.ReadEntity;
Expand Down Expand Up @@ -180,7 +179,7 @@ public void authorize(Database db, Privilege[] readRequiredPriv, Privilege[] wri
private static boolean userHasProxyPrivilege(String user, Configuration conf) {
try {
if (MetaStoreServerUtils.checkUserHasHostProxyPrivileges(user, conf,
HiveMetaStore.HMSHandler.getIPAddress())) {
HMSHandler.getIPAddress())) {
LOG.info("user {} has host proxy privilege.", user);
return true;
}
Expand Down
Expand Up @@ -24,14 +24,13 @@
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hive.conf.HiveConf;
import org.apache.hadoop.hive.metastore.MetaStoreFilterHook;
import org.apache.hadoop.hive.metastore.HiveMetaStore;
import org.apache.hadoop.hive.metastore.HMSHandler;
import org.apache.hadoop.hive.metastore.MetaStorePreEventListener;
import org.apache.hadoop.hive.metastore.TableType;
import org.apache.hadoop.hive.metastore.api.InvalidOperationException;
import org.apache.hadoop.hive.metastore.api.MetaException;
import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
import org.apache.hadoop.hive.metastore.api.Table;
import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
import org.apache.hadoop.hive.metastore.events.PreAlterTableEvent;
import org.apache.hadoop.hive.metastore.events.PreCreateTableEvent;
import org.apache.hadoop.hive.metastore.events.PreDropTableEvent;
Expand Down Expand Up @@ -483,7 +482,7 @@ HiveAuthorizer createHiveMetaStoreAuthorizer() throws Exception {

boolean isSuperUser(String userName) {
Configuration conf = getConf();
String ipAddress = HiveMetaStore.HMSHandler.getIPAddress();
String ipAddress = HMSHandler.getIPAddress();
return (MetaStoreServerUtils.checkUserHasHostProxyPrivileges(userName, conf, ipAddress));
}

Expand Down
Expand Up @@ -20,7 +20,7 @@
package org.apache.hadoop.hive.ql.security.authorization.plugin.metastore;

import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.hive.metastore.HiveMetaStore;
import org.apache.hadoop.hive.metastore.HMSHandler;
import org.apache.hadoop.hive.metastore.events.PreEventContext;
import org.apache.hadoop.hive.ql.security.authorization.plugin.HiveAuthzContext;
import org.apache.hadoop.hive.ql.security.authorization.plugin.HiveOperationType;
Expand Down Expand Up @@ -104,7 +104,7 @@ private HiveAuthzContext createHiveAuthzContext() {
// TODO: refer to SessionManager/HiveSessionImpl for details on getting ipAddress and forwardedAddresses
builder.setForwardedAddresses(new ArrayList<>());

String ipAddress = HiveMetaStore.HMSHandler.getIPAddress();
String ipAddress = HMSHandler.getIPAddress();

builder.setUserIpAddress(ipAddress);

Expand Down
Expand Up @@ -31,7 +31,7 @@
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.hive.conf.HiveConf;
import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
import org.apache.hadoop.hive.metastore.HiveMetaStore;
import org.apache.hadoop.hive.metastore.HMSHandler;
import org.apache.hadoop.hive.metastore.IMetaStoreClient;
import org.apache.hadoop.hive.metastore.api.GetPrincipalsInRoleRequest;
import org.apache.hadoop.hive.metastore.api.GetPrincipalsInRoleResponse;
Expand Down Expand Up @@ -121,7 +121,7 @@ private List<HiveRoleGrant> getRolesFromMS() throws HiveAuthzPluginException {
getAllRoleAncestors(name2Rolesmap, roles);
List<HiveRoleGrant> currentRoles = new ArrayList<HiveRoleGrant>(roles.size());
for (HiveRoleGrant role : name2Rolesmap.values()) {
if (!HiveMetaStore.ADMIN.equalsIgnoreCase(role.getRoleName())) {
if (!HMSHandler.ADMIN.equalsIgnoreCase(role.getRoleName())) {
currentRoles.add(role);
} else {
this.adminRole = role;
Expand Down Expand Up @@ -541,7 +541,7 @@ public void setCurrentRole(String roleName) throws HiveAccessControlException,
}
}
// set to ADMIN role, if user belongs there.
if (HiveMetaStore.ADMIN.equalsIgnoreCase(roleName) && null != this.adminRole) {
if (HMSHandler.ADMIN.equalsIgnoreCase(roleName) && null != this.adminRole) {
currentRoles.clear();
currentRoles.add(adminRole);
return;
Expand All @@ -565,7 +565,7 @@ boolean isUserAdmin() throws HiveAuthzPluginException {
List<HiveRoleGrant> roles;
roles = getCurrentRoles();
for (HiveRoleGrant role : roles) {
if (role.getRoleName().equalsIgnoreCase(HiveMetaStore.ADMIN)) {
if (role.getRoleName().equalsIgnoreCase(HMSHandler.ADMIN)) {
return true;
}
}
Expand Down
Expand Up @@ -61,7 +61,7 @@
import static org.apache.hadoop.hive.conf.Constants.COMPACTOR_CLEANER_THREAD_NAME_FORMAT;
import static org.apache.hadoop.hive.conf.HiveConf.ConfVars.HIVE_COMPACTOR_CLEANER_RETENTION_TIME;
import static org.apache.hadoop.hive.conf.HiveConf.ConfVars.HIVE_COMPACTOR_DELAYED_CLEANUP_ENABLED;
import static org.apache.hadoop.hive.metastore.HiveMetaStore.HMSHandler.getMSForConf;
import static org.apache.hadoop.hive.metastore.HMSHandler.getMSForConf;
import static org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.getDefaultCatalog;

/**
Expand Down
Expand Up @@ -33,7 +33,7 @@
import java.util.List;
import java.util.concurrent.atomic.AtomicBoolean;

import static org.apache.hadoop.hive.metastore.HiveMetaStore.HMSHandler.getMSForConf;
import static org.apache.hadoop.hive.metastore.HMSHandler.getMSForConf;
import static org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.getDefaultCatalog;

/**
Expand Down
Expand Up @@ -62,7 +62,7 @@ public class TestHiveMetaStoreAuthorizer {
private static final String TEST_DATA_DIR = new File("file:///testdata").getPath();
private RawStore rawStore;
private Configuration conf;
private HiveMetaStore.HMSHandler hmsHandler;
private HMSHandler hmsHandler;

@Before
public void setUp() throws Exception {
Expand All @@ -82,11 +82,11 @@ public void setUp() throws Exception {

MetaStoreTestUtils.setConfForStandloneMode(conf);

hmsHandler = new HiveMetaStore.HMSHandler("test", conf, true);
hmsHandler = new HMSHandler("test", conf, true);
rawStore = new ObjectStore();
rawStore.setConf(hmsHandler.getConf());
// Create the 'hive' catalog with new warehouse directory
HiveMetaStore.HMSHandler.createDefaultCatalog(rawStore, new Warehouse(conf));
HMSHandler.createDefaultCatalog(rawStore, new Warehouse(conf));
try {
hmsHandler.drop_table(dbName, tblName, true);
hmsHandler.drop_database(dbName, true, false);
Expand Down
Expand Up @@ -21,7 +21,7 @@

import java.util.Map;

import org.apache.hadoop.hive.metastore.HiveMetaStore;
import org.apache.hadoop.hive.metastore.HMSHandler;
import org.apache.hadoop.hive.metastore.RawStore;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
Expand Down Expand Up @@ -67,7 +67,7 @@ private void cleanRawStore() {
*/
public void cacheThreadLocalRawStore() {
Long threadId = this.getId();
RawStore threadLocalRawStore = HiveMetaStore.HMSHandler.getRawStore();
RawStore threadLocalRawStore = HMSHandler.getRawStore();
if (threadLocalRawStore == null) {
LOG.debug("Thread Local RawStore is null, for the thread: " +
this.getName() + " and so removing entry from threadRawStoreMap.");
Expand Down