Skip to content

Commit

Permalink
HIVE-2936 : Warehouse table subdirectories should inherit the group p…
Browse files Browse the repository at this point in the history
…ermissions of the warehouse parent directory (Rohini via Ashutosh Chauhan)

git-svn-id: https://svn.apache.org/repos/asf/hive/trunk@1325791 13f79535-47bb-0310-9956-ffa450edef68
  • Loading branch information
ashutoshc committed Apr 13, 2012
1 parent dc0a962 commit cafdcf0
Show file tree
Hide file tree
Showing 5 changed files with 52 additions and 13 deletions.
2 changes: 1 addition & 1 deletion common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
Expand Up @@ -574,7 +574,7 @@ public static enum ConfVars {
// Whether to delete the scratchdir while startup
HIVE_START_CLEANUP_SCRATCHDIR("hive.start.cleanup.scratchdir", false),
HIVE_INSERT_INTO_MULTILEVEL_DIRS("hive.insert.into.multilevel.dirs", false),
HIVE_FILES_UMASK_VALUE("hive.files.umask.value", 0002),
HIVE_WAREHOUSE_SUBDIR_INHERIT_PERMS("hive.warehouse.subdir.inherit.perms", false),

// parameters for using multiple clusters in a hive instance
HIVE_USE_INPUT_PRIMARY_REGION("hive.use.input.primary.region", true),
Expand Down
8 changes: 5 additions & 3 deletions conf/hive-default.xml.template
Expand Up @@ -1240,9 +1240,11 @@
</property>

<property>
<name>hive.files.umask.value</name>
<value>0002</value>
<description>The dfs.umask value for the hive created folders</description>
<name>hive.warehouse.subdir.inherit.perms</name>
<value>false</value>
<description>Set this to true if the the table directories should inherit the
permission of the warehouse or database directory instead of being created
with the permissions derived from dfs umask</description>
</property>

<property>
Expand Down
22 changes: 17 additions & 5 deletions metastore/src/java/org/apache/hadoop/hive/metastore/Warehouse.java
Expand Up @@ -41,7 +41,6 @@
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hive.common.FileUtils;
import org.apache.hadoop.hive.common.JavaUtils;
import org.apache.hadoop.hive.conf.HiveConf;
Expand All @@ -64,6 +63,7 @@ public class Warehouse {

private MetaStoreFS fsHandler = null;
private boolean storageAuthCheck = false;
private boolean inheritPerms = false;

public Warehouse(Configuration conf) throws MetaException {
this.conf = conf;
Expand All @@ -75,6 +75,8 @@ public Warehouse(Configuration conf) throws MetaException {
fsHandler = getMetaStoreFsHandler(conf);
storageAuthCheck = HiveConf.getBoolVar(conf,
HiveConf.ConfVars.METASTORE_AUTHORIZATION_STORAGE_AUTH_CHECKS);
inheritPerms = HiveConf.getBoolVar(conf,
HiveConf.ConfVars.HIVE_WAREHOUSE_SUBDIR_INHERIT_PERMS);
}

private MetaStoreFS getMetaStoreFsHandler(Configuration conf)
Expand Down Expand Up @@ -172,10 +174,20 @@ public boolean mkdirs(Path f) throws MetaException {
try {
fs = getFs(f);
LOG.debug("Creating directory if it doesn't exist: " + f);
short umaskVal = (short) conf.getInt(HiveConf.ConfVars.HIVE_FILES_UMASK_VALUE.name(), 0002);
FsPermission fsPermission = new FsPermission(umaskVal);
FsPermission.setUMask(conf, fsPermission);
return (fs.mkdirs(f) || fs.getFileStatus(f).isDir());
//Check if the directory already exists. We want to change the permission
//to that of the parent directory only for newly created directories.
if (this.inheritPerms) {
try {
return fs.getFileStatus(f).isDir();
} catch (FileNotFoundException ignore) {
}
}
boolean success = fs.mkdirs(f);
if (this.inheritPerms && success) {
// Set the permission of parent directory.
fs.setPermission(f, fs.getFileStatus(f.getParent()).getPermission());
}
return success;
} catch (IOException e) {
closeFs(fs);
MetaStoreUtils.logAndThrowMetaException(e);
Expand Down
Expand Up @@ -18,14 +18,17 @@

package org.apache.hadoop.hive.metastore;

import org.apache.hadoop.hive.conf.HiveConf;
import org.apache.hadoop.util.StringUtils;

public class TestEmbeddedHiveMetaStore extends TestHiveMetaStore {

@Override
protected void setUp() throws Exception {
super.setUp();

hiveConf.setBoolean(
HiveConf.ConfVars.HIVE_WAREHOUSE_SUBDIR_INHERIT_PERMS.varname, true);
warehouse = new Warehouse(hiveConf);
try {
client = new HiveMetaStoreClient(hiveConf, null);
} catch (Throwable e) {
Expand Down
Expand Up @@ -132,6 +132,17 @@ public static void partitionTester(HiveMetaStoreClient client, HiveConf hiveConf
Database db = new Database();
db.setName(dbName);
client.createDatabase(db);
db = client.getDatabase(dbName);
Path dbPath = new Path(db.getLocationUri());
FileSystem fs = FileSystem.get(dbPath.toUri(), hiveConf);
boolean inheritPerms = hiveConf.getBoolVar(
HiveConf.ConfVars.HIVE_WAREHOUSE_SUBDIR_INHERIT_PERMS);
FsPermission dbPermission = fs.getFileStatus(dbPath).getPermission();
if (inheritPerms) {
//Set different perms for the database dir for further tests
dbPermission = new FsPermission((short)488);
fs.setPermission(dbPath, dbPermission);
}

client.dropType(typeName);
Type typ1 = new Type();
Expand Down Expand Up @@ -178,6 +189,9 @@ public static void partitionTester(HiveMetaStoreClient client, HiveConf hiveConf
tbl = client.getTable(dbName, tblName);
}

assertEquals(dbPermission, fs.getFileStatus(new Path(tbl.getSd().getLocation()))
.getPermission());

Partition part = makePartitionObject(dbName, tblName, vals, tbl, "/part1");
Partition part2 = makePartitionObject(dbName, tblName, vals2, tbl, "/part2");
Partition part3 = makePartitionObject(dbName, tblName, vals3, tbl, "/part3");
Expand All @@ -195,14 +209,20 @@ public static void partitionTester(HiveMetaStoreClient client, HiveConf hiveConf
assertTrue("getPartition() should have thrown NoSuchObjectException", exceptionThrown);
Partition retp = client.add_partition(part);
assertNotNull("Unable to create partition " + part, retp);
assertEquals(dbPermission, fs.getFileStatus(new Path(retp.getSd().getLocation()))
.getPermission());
Partition retp2 = client.add_partition(part2);
assertNotNull("Unable to create partition " + part2, retp2);
assertEquals(dbPermission, fs.getFileStatus(new Path(retp2.getSd().getLocation()))
.getPermission());
Partition retp3 = client.add_partition(part3);
assertNotNull("Unable to create partition " + part3, retp3);
assertEquals(dbPermission, fs.getFileStatus(new Path(retp3.getSd().getLocation()))
.getPermission());
Partition retp4 = client.add_partition(part4);
assertNotNull("Unable to create partition " + part4, retp4);


assertEquals(dbPermission, fs.getFileStatus(new Path(retp4.getSd().getLocation()))
.getPermission());

Partition part_get = client.getPartition(dbName, tblName, part.getValues());
if(isThriftClient) {
Expand Down Expand Up @@ -284,7 +304,6 @@ public static void partitionTester(HiveMetaStoreClient client, HiveConf hiveConf
assertTrue("Bad partition spec should have thrown an exception", exceptionThrown);

Path partPath = new Path(part.getSd().getLocation());
FileSystem fs = FileSystem.get(partPath.toUri(), hiveConf);


assertTrue(fs.exists(partPath));
Expand All @@ -307,6 +326,8 @@ public static void partitionTester(HiveMetaStoreClient client, HiveConf hiveConf
// tested
retp = client.add_partition(part);
assertNotNull("Unable to create partition " + part, retp);
assertEquals(dbPermission, fs.getFileStatus(new Path(retp.getSd().getLocation()))
.getPermission());

// test add_partitions

Expand Down Expand Up @@ -344,6 +365,7 @@ public static void partitionTester(HiveMetaStoreClient client, HiveConf hiveConf
Path mp5Path = new Path(mpart5.getSd().getLocation());
warehouse.mkdirs(mp5Path);
assertTrue(fs.exists(mp5Path));
assertEquals(dbPermission, fs.getFileStatus(mp5Path).getPermission());

// add_partitions(5,4) : err = duplicate keyvals on mpart4
savedException = null;
Expand Down

0 comments on commit cafdcf0

Please sign in to comment.