Skip to content
Permalink
Browse files

tmp

  • Loading branch information...
LuQQiu committed May 15, 2019
1 parent 2440ee3 commit 2c7835aaa2a7f07274e999994de27afc08fbac7e
@@ -71,42 +71,43 @@
<groupId>org.alluxio</groupId>
<artifactId>alluxio-underfs-gcs</artifactId>
<version>2.0.0-SNAPSHOT</version>
<scope>compile</scope>
<scope>provided</scope>
</dependency>
<dependency>
<groupId>org.alluxio</groupId>
<artifactId>alluxio-underfs-hdfs</artifactId>
<version>2.0.0-SNAPSHOT</version>
<scope>compile</scope>
<scope>provided</scope>
</dependency>
<dependency>
<groupId>org.alluxio</groupId>
<artifactId>alluxio-underfs-kodo</artifactId>
<version>2.0.0-SNAPSHOT</version>
<scope>compile</scope>
<scope>provided</scope>
</dependency>
<dependency>
<groupId>org.alluxio</groupId>
<artifactId>alluxio-underfs-oss</artifactId>
<version>2.0.0-SNAPSHOT</version>
<scope>compile</scope>
<scope>provided</scope>
</dependency>
<dependency>
<groupId>org.alluxio</groupId>
<artifactId>alluxio-underfs-s3a</artifactId>
<version>2.0.0-SNAPSHOT</version>
<scope>provided</scope>
</dependency>
<dependency>
<groupId>org.alluxio</groupId>
<artifactId>alluxio-underfs-swift</artifactId>
<version>2.0.0-SNAPSHOT</version>
<scope>compile</scope>
<scope>provided</scope>
</dependency>
<dependency>
<groupId>org.alluxio</groupId>
<artifactId>alluxio-underfs-wasb</artifactId>
<version>2.0.0-SNAPSHOT</version>
<scope>compile</scope>
<scope>provided</scope>
</dependency>
</dependencies>
</project>
@@ -71,7 +71,7 @@ public static boolean runExample(final Callable<Boolean> example) {
* @param ufs the under filesystem
* @param directory the directory to clean
*/
public static void cleanup(UnderFileSystem ufs, String directory) throws IOException {
public static void cleanupUfs(UnderFileSystem ufs, String directory) throws IOException {
UfsStatus[] statuses = ufs.listStatus(directory);
for (UfsStatus status : statuses) {
if (status instanceof UfsFileStatus) {
@@ -18,15 +18,9 @@
import alluxio.underfs.UnderFileSystem;
import alluxio.underfs.UnderFileSystemConfiguration;
import alluxio.underfs.UnderFileSystemFactory;
import alluxio.underfs.gcs.GCSUnderFileSystemFactory;
import alluxio.underfs.hdfs.HdfsUnderFileSystemFactory;
import alluxio.underfs.kodo.KodoUnderFileSystemFactory;
import alluxio.underfs.UnderFileSystemFactoryRegistry;
import alluxio.underfs.options.DeleteOptions;
import alluxio.underfs.oss.OSSUnderFileSystemFactory;
import alluxio.underfs.s3a.S3AUnderFileSystem;
import alluxio.underfs.s3a.S3AUnderFileSystemFactory;
import alluxio.underfs.swift.SwiftUnderFileSystemFactory;
import alluxio.underfs.wasb.WasbUnderFileSystemFactory;
import alluxio.util.ConfigurationUtils;
import alluxio.util.io.PathUtils;

@@ -37,6 +31,9 @@
import org.slf4j.LoggerFactory;

import java.io.IOException;
import java.lang.reflect.InvocationTargetException;
import java.lang.reflect.Method;
import java.util.ArrayList;
import java.util.List;
import java.util.UUID;

@@ -47,11 +44,6 @@
public final class UnderFileSystemContractTest {
private static final Logger LOG = LoggerFactory.getLogger(UnderFileSystemContractTest.class);

@Parameter(names = {"--ufs"}, required = true,
description = "The target ufs type. Valid value includes "
+ "HDFS, S3A, GCS, KODO, OSS, SWIFT, and WASB")
private String mUfsType;

@Parameter(names = {"--path"}, required = true,
description = "The under filesystem path to run tests against.")
private String mUfsPath;
@@ -61,116 +53,103 @@

// The factory to check if the given ufs path is valid and create ufs
private UnderFileSystemFactory mFactory;
private UnderFileSystem mUfs;
private String mUfsType;
private InstancedConfiguration mConf
= new InstancedConfiguration(ConfigurationUtils.defaults());
private String mFailedTest;

private UnderFileSystemContractTest() {}

private void run() throws Exception {
mFactory = UnderFileSystemFactoryRegistry.find(mUfsPath,
UnderFileSystemConfiguration.defaults(mConf));
// Check if the ufs path is valid
createUnderFileSystemFactory();
if (mFactory == null || !mFactory.supportsPath(mUfsPath)) {
LOG.error("%s is not a valid %s path", mUfsPath, mUfsType);
LOG.error("{} is not a valid path", mUfsPath);
System.exit(1);
}

mConf.set(PropertyKey.UNDERFS_LISTING_LENGTH, "50");
mConf.set(PropertyKey.USER_BLOCK_SIZE_BYTES_DEFAULT, "512B");
// Increase the buffer time of journal writes to speed up tests
mConf.set(PropertyKey.MASTER_JOURNAL_FLUSH_BATCH_TIME_MS, "1sec");

mUfs = createUnderFileSystem();
mUfsType = mUfs.getUnderFSType();

runCommonOperations();
if (mUfsType.equals("S3A")) {

if (mUfsType.equals("s3")) {
runS3AOperations();
}
CliUtils.printPassInfo(true);
}

private void runCommonOperations() throws Exception {
InstancedConfiguration conf = new InstancedConfiguration(ConfigurationUtils.defaults());
conf.set(PropertyKey.UNDERFS_LISTING_LENGTH, "50");
conf.set(PropertyKey.USER_BLOCK_SIZE_BYTES_DEFAULT, "512B");
// Increase the buffer time of journal writes to speed up tests
conf.set(PropertyKey.MASTER_JOURNAL_FLUSH_BATCH_TIME_MS, "1sec");

UnderFileSystem ufs = createUnderFileSystem(conf);
// Create the test directory to run tests against
String testDir = PathUtils.concatPath(mUfsPath, UUID.randomUUID());
UnderFileSystemCommonOperations ops
= new UnderFileSystemCommonOperations(mUfsPath, testDir, ufs, conf);
try {
ops.runTests();
} catch (IOException e) {
if (mUfsType.equals("S3A")) {
List<String> operations = ops.getRelatedS3AOperations();
if (operations.size() > 0) {
LOG.info("Related S3 operations: "
+ StringUtils.join(ops.getRelatedS3AOperations(), ","));
}
}
throw e;
} finally {
cleanup(ufs, testDir);
}
= new UnderFileSystemCommonOperations(mUfsPath, testDir, mUfs, mConf);
loadAndRunTests(ops, testDir);
}

private void runS3AOperations() throws IOException {
InstancedConfiguration conf = new InstancedConfiguration(ConfigurationUtils.defaults());
conf.set(PropertyKey.USER_BLOCK_SIZE_BYTES_DEFAULT, "1MB");
conf.set(PropertyKey.MASTER_JOURNAL_FLUSH_BATCH_TIME_MS, "1sec");
conf.set(PropertyKey.UNDERFS_S3A_LIST_OBJECTS_VERSION_1, "true");
conf.set(PropertyKey.UNDERFS_S3A_STREAMING_UPLOAD_ENABLED, "true");
conf.set(PropertyKey.UNDERFS_S3A_STREAMING_UPLOAD_PARTITION_SIZE, "5MB");
conf.set(PropertyKey.UNDERFS_S3A_INTERMEDIATE_UPLOAD_CLEAN_AGE, "0");
private void runS3AOperations() throws Exception {
mConf.set(PropertyKey.UNDERFS_S3A_LIST_OBJECTS_VERSION_1, "true");
mConf.set(PropertyKey.UNDERFS_S3A_STREAMING_UPLOAD_ENABLED, "true");
mConf.set(PropertyKey.UNDERFS_S3A_STREAMING_UPLOAD_PARTITION_SIZE, "5MB");
mConf.set(PropertyKey.UNDERFS_S3A_INTERMEDIATE_UPLOAD_CLEAN_AGE, "0");

UnderFileSystem ufs = createUnderFileSystem(conf);
mUfs = createUnderFileSystem();
String testDir = PathUtils.concatPath(mUfsPath, UUID.randomUUID());
S3ASpecificOperations ops
= new S3ASpecificOperations(mUfsPath, testDir, (S3AUnderFileSystem) ufs, conf);
= new S3ASpecificOperations(mUfsPath, testDir, (S3AUnderFileSystem) mUfs, mConf);
loadAndRunTests(ops, testDir);
}

public void loadAndRunTests(Object object, String testDir) throws Exception {
try {
ops.runTests();
Class classToRun = object.getClass();
Method[] methods = classToRun.getMethods();
for(Method method : methods){
String methodName = method.getName();
if (methodName.endsWith("Test")) {
mFailedTest = methodName;
LOG.info("Running test: " + methodName);
method.invoke(object);
LOG.info("Test Passed!");
}
}
mFailedTest = "";
} catch (InvocationTargetException e) {
if (mUfsType.equals("S3A")) {
List<String> operations = getRelatedS3AOperations();
if (operations.size() > 0) {
LOG.info("Related S3 operations: "
+ StringUtils.join(operations, ","));
}
}
throw new IOException(e.getTargetException());
} finally {
cleanup(ufs, testDir);
cleanupUfs(mUfs, testDir);
}
}

private UnderFileSystem createUnderFileSystem(InstancedConfiguration conf) {
private UnderFileSystem createUnderFileSystem() {
UnderFileSystem ufs = mFactory.create(mUfsPath,
UnderFileSystemConfiguration.defaults(conf), conf);
UnderFileSystemConfiguration.defaults(mConf));
if (ufs == null) {
LOG.error("Failed to create under filesystem");
System.exit(1);
}
return ufs;
}

private void cleanup(UnderFileSystem ufs, String testDir) throws IOException {
private void cleanupUfs(UnderFileSystem ufs, String testDir) throws IOException {
ufs.deleteDirectory(testDir, DeleteOptions.defaults().setRecursive(true));
ufs.close();
}

private void createUnderFileSystemFactory() throws IOException {
switch (mUfsType) {
case "HDFS":
mFactory = new HdfsUnderFileSystemFactory();
break;
case "S3A":
mFactory = new S3AUnderFileSystemFactory();
break;
case "GCS":
mFactory = new GCSUnderFileSystemFactory();
break;
case "KODO":
mFactory = new KodoUnderFileSystemFactory();
break;
case "OSS":
mFactory = new OSSUnderFileSystemFactory();
break;
case "SWIFT":
mFactory = new SwiftUnderFileSystemFactory();
break;
case "WASB":
mFactory = new WasbUnderFileSystemFactory();
break;
default:
LOG.error("The given ufs type is invalid");
System.exit(1);
}
}

/**
* @param args the input arguments
*/
@@ -201,9 +180,102 @@ private static String getHelpMessage() {
+ "a S3A compatibility test to test if the target under filesystem can "
+ "fulfill the minimum S3A compatibility requirements in order to "
+ "work well with Alluxio through Alluxio's integration with S3A. \n"
+ "Command line example: 'bin/alluxio runUnderFileSystemTest --name S3A "
+ "--path=s3a://testPath -Daws.accessKeyId=<accessKeyId> -Daws.secretKeyId=<secretKeyId>"
+ "Command line example: 'bin/alluxio runUnderFileSystemTest --path=s3a://testPath "
+ "-Daws.accessKeyId=<accessKeyId> -Daws.secretKeyId=<secretKeyId>"
+ "-Dalluxio.underfs.s3.endpoint=<endpoint_url> "
+ "-Dalluxio.underfs.s3.disable.dns.buckets=true'";
}

/**
* Gets the S3A operations related to the failed test. This method
* should only be called when the ufs is S3A.
*
* @return the related S3A operations
*/
public List<String> getRelatedS3AOperations() {
List<String> operations = new ArrayList<>();
switch (mFailedTest) {
case "createAtomic":
case "createEmpty":
case "createParent":
case "createThenGetExistingFileStatus":
case "createThenGetExistingStatus":
case "getFileSize":
case "getFileStatus":
case "getModTime":
operations.add("TransferManager.upload()");
operations.add("AmazonS3Client.getObjectMetadata()");
break;
case "createOpen":
case "createOpenAtPosition":
case "createOpenEmpty":
case "createOpenExistingLargeFile":
case "createOpenLarge":
operations.add("TransferManager.upload()");
operations.add("AmazonS3Client.getObject()");
break;
case "deleteFile":
operations.add("TransferManager.upload()");
operations.add("AmazonS3Client.deleteObject()");
operations.add("AmazonS3Client.getObjectMetadata()");
break;
case "deleteDir":
case "deleteLargeDirectory":
case "createThenDeleteExistingDirectory":
operations.add("AmazonS3Client.putObject()");
operations.add("AmazonS3Client.deleteObjects()");
operations.add("AmazonS3Client.listObjectsV2()");
operations.add("AmazonS3Client.getObjectMetadata()");
break;
case "createDeleteFileConjuction":
operations.add("TransferManager.upload()");
operations.add("AmazonS3Client.getObjectMetadata()");
operations.add("AmazonS3Client.deleteObject()");
break;
case "exists":
case "getDirectoryStatus":
case "createThenGetExistingDirectoryStatus":
operations.add("AmazonS3Client.putObject()");
operations.add("AmazonS3Client.getObjectMetadata()");
break;
case "isFile":
operations.add("AmazonS3Client.putObject()");
operations.add("AmazonS3Client.deleteObject()");
break;
case "listStatus":
case "listStatusEmpty":
case "listStatusFile":
case "listLargeDirectory":
case "listStatusRecursive":
case "mkdirs":
case "objectCommonPrefixesIsDirectory":
case "objectCommonPrefixesListStatusNonRecursive":
case "objectCommonPrefixesListStatusRecursive":
case "objectNestedDirsListStatusRecursive":
operations.add("AmazonS3Client.putObject()");
operations.add("AmazonS3Client.listObjectsV2()");
operations.add("AmazonS3Client.getObjectMetadata()");
break;
case "renameFile":
case "renameRenamableFile":
operations.add("TransferManager.upload()");
operations.add("TransferManager.copyObject()");
operations.add("AmazonS3Client.deleteObject()");
operations.add("AmazonS3Client.getObjectMetadata()");
break;
case "renameDirectory":
case "renameDirectoryDeep":
case "renameLargeDirectory":
case "renameRenameableDirectory":
operations.add("AmazonS3Client.putObject()");
operations.add("TransferManager.upload()");
operations.add("TransferManager.copyObject()");
operations.add("AmazonS3Client.listObjectsV2()");
operations.add("AmazonS3Client.getObjectMetadata()");
break;
default:
break;
}
return operations;
}
}

0 comments on commit 2c7835a

Please sign in to comment.
You can’t perform that action at this time.