Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,8 @@
import java.io.IOException;
import java.net.URI;
import java.util.Arrays;
import java.util.Collections;
import java.util.HashSet;
import java.util.Locale;
import java.util.concurrent.TimeUnit;

Expand All @@ -33,7 +35,6 @@
import com.amazonaws.services.securitytoken.AWSSecurityTokenServiceClientBuilder;
import com.amazonaws.services.securitytoken.model.AWSSecurityTokenServiceException;
import org.apache.hadoop.classification.VisibleForTesting;
import org.apache.hadoop.util.Sets;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

Expand Down Expand Up @@ -110,7 +111,7 @@ public AssumedRoleCredentialProvider(@Nullable URI fsUri, Configuration conf)
Arrays.asList(
SimpleAWSCredentialsProvider.class,
EnvironmentVariableCredentialsProvider.class),
Sets.newHashSet(this.getClass()));
new HashSet<>(Collections.singletonList(this.getClass())));
LOG.debug("Credentials to obtain role credentials: {}", credentialsToSTS);

// then the STS binding
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -24,13 +24,13 @@
import java.nio.file.AccessDeniedException;
import java.util.Arrays;
import java.util.Collections;
import java.util.HashSet;
import java.util.List;

import com.amazonaws.auth.AWSCredentials;
import com.amazonaws.auth.AWSCredentialsProvider;
import com.amazonaws.auth.EnvironmentVariableCredentialsProvider;
import com.amazonaws.auth.InstanceProfileCredentialsProvider;
import org.apache.hadoop.util.Sets;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.ExpectedException;
Expand Down Expand Up @@ -180,7 +180,7 @@ public void testFallbackToDefaults() throws Throwable {
ASSUMED_ROLE_CREDENTIALS_PROVIDER,
Arrays.asList(
EnvironmentVariableCredentialsProvider.class),
Sets.newHashSet());
new HashSet<>());
assertTrue("empty credentials", credentials.size() > 0);

}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -28,13 +28,13 @@
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.HashSet;
import java.util.List;
import java.util.Locale;
import java.util.Set;
import java.util.UUID;
import java.util.stream.Collectors;

import org.apache.hadoop.util.Sets;
import org.assertj.core.api.Assertions;
import org.junit.FixMethodOrder;
import org.junit.Rule;
Expand Down Expand Up @@ -220,7 +220,7 @@ public void test_200_execute() throws Exception {

// create all the input files on the local FS.
List<String> expectedFiles = new ArrayList<>(numFiles);
Set<String> expectedKeys = Sets.newHashSet();
Set<String> expectedKeys = new HashSet<>();
for (int i = 0; i < numFiles; i += 1) {
File file = localFilesDir.newFile(i + ".text");
try (FileOutputStream out = new FileOutputStream(file)) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,6 @@
import com.amazonaws.services.s3.model.AbortMultipartUploadRequest;
import com.amazonaws.services.s3.model.CompleteMultipartUploadRequest;

import org.apache.hadoop.util.Sets;
import org.assertj.core.api.Assertions;
import org.junit.After;
import org.junit.Before;
Expand Down Expand Up @@ -447,7 +446,7 @@ public void testSingleTaskMultiFileCommit() throws Exception {
assertEquals("Should have correct number of pending commits",
files.size(), pending.size());

Set<String> keys = Sets.newHashSet();
Set<String> keys = new HashSet<>();
for (SinglePendingCommit commit : pending) {
assertEquals("Should write to the correct bucket: " + commit,
BUCKET, commit.getBucket());
Expand Down Expand Up @@ -709,7 +708,7 @@ private Set<String> runTasks(JobContext jobContext,
int numTasks, int numFiles)
throws IOException {
results.resetUploads();
Set<String> uploads = Sets.newHashSet();
Set<String> uploads = new HashSet<>();

for (int taskId = 0; taskId < numTasks; taskId += 1) {
TaskAttemptID attemptID = new TaskAttemptID(
Expand Down Expand Up @@ -746,7 +745,7 @@ private Set<String> commitTask(StagingCommitter staging,
throws IOException {
Path attemptPath = staging.getTaskAttemptPath(attempt);

Set<String> files = Sets.newHashSet();
Set<String> files = new HashSet<>();
for (int i = 0; i < numFiles; i += 1) {
Path outPath = writeOutputFile(
attempt.getTaskAttemptID(), attemptPath, UUID.randomUUID().toString(),
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -20,13 +20,13 @@

import java.io.IOException;
import java.util.Arrays;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
import java.util.UUID;

import com.amazonaws.services.s3.model.InitiateMultipartUploadRequest;
import org.apache.hadoop.util.Lists;
import org.apache.hadoop.util.Sets;
import org.assertj.core.api.Assertions;
import org.junit.BeforeClass;
import org.junit.Test;
Expand Down Expand Up @@ -145,7 +145,7 @@ public void testAppend() throws Exception {
*/
protected void verifyFilesCreated(
final PartitionedStagingCommitter committer) {
Set<String> files = Sets.newHashSet();
Set<String> files = new HashSet<>();
for (InitiateMultipartUploadRequest request :
getMockResults().getRequests().values()) {
assertEquals(BUCKET, request.getBucketName());
Expand Down Expand Up @@ -185,7 +185,7 @@ public void testReplace() throws Exception {
}

public Set<String> buildExpectedList(StagingCommitter committer) {
Set<String> expected = Sets.newHashSet();
Set<String> expected = new HashSet<>();
boolean unique = committer.useUniqueFilenames();
for (String relative : relativeFiles) {
expected.add(OUTPUT_PREFIX +
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -33,9 +33,9 @@
import java.io.IOException;
import java.lang.reflect.Constructor;
import java.net.URI;
import java.util.HashSet;
import java.util.Set;

import org.apache.hadoop.util.Sets;

/**
* The CopyListing abstraction is responsible for how the list of
Expand Down Expand Up @@ -163,8 +163,8 @@ private void validateFinalListing(Path pathToListFile, DistCpContext context)
CopyListingFileStatus lastFileStatus = new CopyListingFileStatus();

Text currentKey = new Text();
Set<URI> aclSupportCheckFsSet = Sets.newHashSet();
Set<URI> xAttrSupportCheckFsSet = Sets.newHashSet();
Set<URI> aclSupportCheckFsSet = new HashSet<>();
Set<URI> xAttrSupportCheckFsSet = new HashSet<>();
long idx = 0;
while (reader.next(currentKey)) {
if (currentKey.equals(lastKey)) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,6 @@
*/
package org.apache.hadoop.tools.dynamometer;

import org.apache.hadoop.util.Sets;
import java.util.Optional;
import java.util.concurrent.TimeoutException;
import java.util.concurrent.TimeUnit;
Expand All @@ -34,6 +33,9 @@
import java.net.URI;
import java.net.URISyntaxException;
import java.net.URL;
import java.util.Arrays;
import java.util.Collections;
import java.util.HashSet;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
Expand Down Expand Up @@ -271,12 +273,12 @@ public static void setupClass() throws Exception {
RMNodeLabelsManager nodeLabelManager = miniYARNCluster.getResourceManager()
.getRMContext().getNodeLabelManager();
nodeLabelManager.addToCluserNodeLabelsWithDefaultExclusivity(
Sets.newHashSet(NAMENODE_NODELABEL, DATANODE_NODELABEL));
new HashSet<>(Arrays.asList(NAMENODE_NODELABEL, DATANODE_NODELABEL)));
Map<NodeId, Set<String>> nodeLabels = new HashMap<>();
nodeLabels.put(miniYARNCluster.getNodeManager(0).getNMContext().getNodeId(),
Sets.newHashSet(NAMENODE_NODELABEL));
new HashSet<>(Collections.singletonList(NAMENODE_NODELABEL)));
nodeLabels.put(miniYARNCluster.getNodeManager(1).getNMContext().getNodeId(),
Sets.newHashSet(DATANODE_NODELABEL));
new HashSet<>(Collections.singletonList(DATANODE_NODELABEL)));
nodeLabelManager.addLabelsToNode(nodeLabels);
}

Expand Down