Skip to content

Commit

Permalink
HDFS-7655. Expose truncate API for Web HDFS. (yliu)
Browse files Browse the repository at this point in the history
  • Loading branch information
y-liu committed Feb 5, 2015
1 parent 4641196 commit 03f7ed3
Show file tree
Hide file tree
Showing 7 changed files with 129 additions and 18 deletions.
Expand Up @@ -22,7 +22,6 @@
import java.net.URI;
import java.util.Random;


import org.apache.commons.lang.RandomStringUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.security.token.Token;
Expand Down Expand Up @@ -127,28 +126,36 @@ public Path getDefaultWorkingDirectory(FileSystem fSys)
*/
public static long createFile(FileSystem fSys, Path path, int numBlocks,
int blockSize, short numRepl, boolean createParent) throws IOException {
FSDataOutputStream out =
fSys.create(path, false, 4096, numRepl, blockSize );
return createFile(fSys, path, getFileData(numBlocks, blockSize),
blockSize, numRepl);
}

byte[] data = getFileData(numBlocks, blockSize);
out.write(data, 0, data.length);
out.close();
public static long createFile(FileSystem fSys, Path path, byte[] data,
int blockSize, short numRepl) throws IOException {
FSDataOutputStream out =
fSys.create(path, false, 4096, numRepl, blockSize);
try {
out.write(data, 0, data.length);
} finally {
out.close();
}
return data.length;
}


public static long createFile(FileSystem fSys, Path path, int numBlocks,
int blockSize, boolean createParent) throws IOException {
return createFile(fSys, path, numBlocks, blockSize, fSys.getDefaultReplication(path), true);
return createFile(fSys, path, numBlocks, blockSize,
fSys.getDefaultReplication(path), true);
}

public static long createFile(FileSystem fSys, Path path, int numBlocks,
int blockSize) throws IOException {
return createFile(fSys, path, numBlocks, blockSize, true);
return createFile(fSys, path, numBlocks, blockSize, true);
}

public static long createFile(FileSystem fSys, Path path) throws IOException {
return createFile(fSys, path, DEFAULT_NUM_BLOCKS, DEFAULT_BLOCK_SIZE, DEFAULT_NUM_REPL, true);
return createFile(fSys, path, DEFAULT_NUM_BLOCKS, DEFAULT_BLOCK_SIZE,
DEFAULT_NUM_REPL, true);
}

public long createFile(FileSystem fSys, String name) throws IOException {
Expand Down
2 changes: 2 additions & 0 deletions hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
Expand Up @@ -306,6 +306,8 @@ Release 2.7.0 - UNRELEASED

HDFS-6673. Add delimited format support to PB OIV tool. (Eddy Xu via wang)

HDFS-7655. Expose truncate API for Web HDFS. (yliu)

IMPROVEMENTS

HDFS-7055. Add tracing to DFSInputStream (cmccabe)
Expand Down
Expand Up @@ -57,7 +57,7 @@
import org.apache.hadoop.fs.XAttr;
import org.apache.hadoop.fs.permission.AclStatus;
import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.hdfs.StorageType;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.XAttrHelper;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.DirectoryListing;
Expand Down Expand Up @@ -614,10 +614,12 @@ public Response postRoot(
@QueryParam(BufferSizeParam.NAME) @DefaultValue(BufferSizeParam.DEFAULT)
final BufferSizeParam bufferSize,
@QueryParam(ExcludeDatanodesParam.NAME) @DefaultValue(ExcludeDatanodesParam.DEFAULT)
final ExcludeDatanodesParam excludeDatanodes
final ExcludeDatanodesParam excludeDatanodes,
@QueryParam(NewLengthParam.NAME) @DefaultValue(NewLengthParam.DEFAULT)
final NewLengthParam newLength
) throws IOException, InterruptedException {
return post(ugi, delegation, username, doAsUser, ROOT, op, concatSrcs,
bufferSize, excludeDatanodes);
bufferSize, excludeDatanodes, newLength);
}

/** Handle HTTP POST request. */
Expand All @@ -641,19 +643,21 @@ public Response post(
@QueryParam(BufferSizeParam.NAME) @DefaultValue(BufferSizeParam.DEFAULT)
final BufferSizeParam bufferSize,
@QueryParam(ExcludeDatanodesParam.NAME) @DefaultValue(ExcludeDatanodesParam.DEFAULT)
final ExcludeDatanodesParam excludeDatanodes
final ExcludeDatanodesParam excludeDatanodes,
@QueryParam(NewLengthParam.NAME) @DefaultValue(NewLengthParam.DEFAULT)
final NewLengthParam newLength
) throws IOException, InterruptedException {

init(ugi, delegation, username, doAsUser, path, op, concatSrcs, bufferSize,
excludeDatanodes);
excludeDatanodes, newLength);

return ugi.doAs(new PrivilegedExceptionAction<Response>() {
@Override
public Response run() throws IOException, URISyntaxException {
try {
return post(ugi, delegation, username, doAsUser,
path.getAbsolutePath(), op, concatSrcs, bufferSize,
excludeDatanodes);
excludeDatanodes, newLength);
} finally {
reset();
}
Expand All @@ -670,9 +674,11 @@ private Response post(
final PostOpParam op,
final ConcatSourcesParam concatSrcs,
final BufferSizeParam bufferSize,
final ExcludeDatanodesParam excludeDatanodes
final ExcludeDatanodesParam excludeDatanodes,
final NewLengthParam newLength
) throws IOException, URISyntaxException {
final NameNode namenode = (NameNode)context.getAttribute("name.node");
final NamenodeProtocols np = getRPCServer(namenode);

switch(op.getValue()) {
case APPEND:
Expand All @@ -684,9 +690,17 @@ private Response post(
}
case CONCAT:
{
getRPCServer(namenode).concat(fullpath, concatSrcs.getAbsolutePaths());
np.concat(fullpath, concatSrcs.getAbsolutePaths());
return Response.ok().build();
}
case TRUNCATE:
{
// We treat each rest request as a separate client.
final boolean b = np.truncate(fullpath, newLength.getValue(),
"DFSClient_" + DFSUtil.getSecureRandom().nextLong());
final String js = JsonUtil.toJsonString("boolean", b);
return Response.ok(js).type(MediaType.APPLICATION_JSON).build();
}
default:
throw new UnsupportedOperationException(op + " is not supported");
}
Expand Down
Expand Up @@ -1160,6 +1160,14 @@ public FSDataOutputStream append(final Path f, final int bufferSize,
).run();
}

@Override
public boolean truncate(Path f, long newLength) throws IOException {
statistics.incrementWriteOps(1);

final HttpOpParam.Op op = PostOpParam.Op.TRUNCATE;
return new FsPathBooleanRunner(op, f, new NewLengthParam(newLength)).run();
}

@Override
public boolean delete(Path f, boolean recursive) throws IOException {
final HttpOpParam.Op op = DeleteOpParam.Op.DELETE;
Expand Down
@@ -0,0 +1,49 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.web.resources;

/** NewLength parameter. */
public class NewLengthParam extends LongParam {
/** Parameter name. */
public static final String NAME = "newlength";
/** Default parameter value. */
public static final String DEFAULT = NULL;

private static final Domain DOMAIN = new Domain(NAME);

/**
* Constructor.
* @param value the parameter value.
*/
public NewLengthParam(final Long value) {
super(DOMAIN, value, 0L, null);
}

/**
* Constructor.
* @param str a string representation of the parameter value.
*/
public NewLengthParam(final String str) {
this(DOMAIN.parse(str));
}

@Override
public String getName() {
return NAME;
}
}
Expand Up @@ -27,6 +27,8 @@ public static enum Op implements HttpOpParam.Op {

CONCAT(false, HttpURLConnection.HTTP_OK),

TRUNCATE(false, HttpURLConnection.HTTP_OK),

NULL(false, HttpURLConnection.HTTP_NOT_IMPLEMENTED);

final boolean doOutputAndRedirect;
Expand Down
Expand Up @@ -29,11 +29,13 @@

import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.ContentSummary;
import org.apache.hadoop.fs.FSMainOperationsBaseTest;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.AppendTestUtil;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.MiniDFSCluster;
Expand Down Expand Up @@ -136,6 +138,33 @@ public void testConcat() throws Exception {
Assert.assertEquals(1024*4, fileStatus.getLen());
}

@Test
public void testTruncate() throws Exception {
final short repl = 3;
final int blockSize = 1024;
final int numOfBlocks = 2;
Path dir = getTestRootPath(fSys, "test/hadoop");
Path file = getTestRootPath(fSys, "test/hadoop/file");

final byte[] data = getFileData(numOfBlocks, blockSize);
createFile(fSys, file, data, blockSize, repl);

final int newLength = blockSize;

boolean isReady = fSys.truncate(file, newLength);

Assert.assertTrue("Recovery is not expected.", isReady);

FileStatus fileStatus = fSys.getFileStatus(file);
Assert.assertEquals(fileStatus.getLen(), newLength);
AppendTestUtil.checkFullFile(fSys, file, newLength, data, file.toString());

ContentSummary cs = fSys.getContentSummary(dir);
Assert.assertEquals("Bad disk space usage", cs.getSpaceConsumed(),
newLength * repl);
Assert.assertTrue("Deleted", fSys.delete(dir, true));
}

// Test that WebHdfsFileSystem.jsonParse() closes the connection's input
// stream.
// Closing the inputstream in jsonParse will allow WebHDFS to reuse
Expand Down

0 comments on commit 03f7ed3

Please sign in to comment.