Skip to content

Commit

Permalink
HDDS-8285. Eliminate leftover Guava Optional from CacheValue (#4482)
Browse files Browse the repository at this point in the history
  • Loading branch information
adoroszlai committed Mar 27, 2023
1 parent 31cc0bd commit e8fbdaa
Show file tree
Hide file tree
Showing 60 changed files with 169 additions and 236 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,6 @@
import java.util.Map;

import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Optional;
import org.apache.hadoop.hdds.utils.MetadataKeyFilters;
import org.apache.hadoop.hdds.utils.TableCacheMetrics;
import org.apache.hadoop.hdds.utils.db.cache.CacheKey;
Expand Down Expand Up @@ -110,7 +109,7 @@ public TypedTable(
// NEVER. Setting epoch value -1, so that when it is marked for
// delete, this will be considered for cleanup.
cache.loadInitial(new CacheKey<>(kv.getKey()),
new CacheValue<>(Optional.of(kv.getValue()), EPOCH_DEFAULT));
CacheValue.get(EPOCH_DEFAULT, kv.getValue()));
}
}
} else {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,6 @@
import java.util.List;
import java.util.Set;

import com.google.common.base.Optional;

import org.apache.hadoop.hdds.StringUtils;
import org.apache.hadoop.hdds.utils.db.managed.ManagedColumnFamilyOptions;
Expand Down Expand Up @@ -249,8 +248,7 @@ public void testTypedTableWithCache() throws Exception {
String key = Integer.toString(x);
String value = Integer.toString(x);
testTable.addCacheEntry(new CacheKey<>(key),
new CacheValue<>(Optional.of(value),
x));
CacheValue.get(x, value));
}

// As we have added to cache, so get should return value even if it
Expand All @@ -275,11 +273,10 @@ public void testTypedTableWithCacheWithFewDeletedOperationType()
String value = Integer.toString(x);
if (x % 2 == 0) {
testTable.addCacheEntry(new CacheKey<>(key),
new CacheValue<>(Optional.of(value), x));
CacheValue.get(x, value));
} else {
testTable.addCacheEntry(new CacheKey<>(key),
new CacheValue<>(Optional.absent(),
x));
CacheValue.get(x));
}
}

Expand Down Expand Up @@ -363,11 +360,11 @@ public void testIsExistCache() throws Exception {
RandomStringUtils.random(10);
String value = RandomStringUtils.random(10);
testTable.addCacheEntry(new CacheKey<>(key),
new CacheValue<>(Optional.of(value), 1L));
CacheValue.get(1L, value));
Assertions.assertTrue(testTable.isExist(key));

testTable.addCacheEntry(new CacheKey<>(key),
new CacheValue<>(Optional.absent(), 1L));
CacheValue.get(1L));
Assertions.assertFalse(testTable.isExist(key));
}
}
Expand Down Expand Up @@ -403,7 +400,7 @@ public void testByteArrayTypedTable() throws Exception {
Assertions.assertArrayEquals(value, testTable.get(key));
Assertions.assertNotSame(value, actualValue);
testTable.addCacheEntry(new CacheKey<>(key),
new CacheValue<>(Optional.of(value), 1L));
CacheValue.get(1L, value));
Assertions.assertSame(value, testTable.get(key));
}
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,6 @@
import java.util.List;
import java.util.concurrent.CompletableFuture;

import com.google.common.base.Optional;
import org.apache.ozone.test.GenericTestUtils;
import org.junit.jupiter.api.Assertions;
import org.junit.jupiter.api.BeforeAll;
Expand Down Expand Up @@ -60,7 +59,7 @@ public void testPartialTableCache(TableCache.CacheType cacheType) {

for (int i = 0; i < 10; i++) {
tableCache.put(new CacheKey<>(Integer.toString(i)),
new CacheValue<>(Optional.of(Integer.toString(i)), i));
CacheValue.get(i, Integer.toString(i)));
}


Expand Down Expand Up @@ -105,9 +104,9 @@ public void testTableCacheWithRenameKey(TableCache.CacheType cacheType) {
// putting cache with same epoch and different keyNames
for (int i = 0; i < 3; i++) {
tableCache.put(new CacheKey<>(Integer.toString(i).concat("A")),
new CacheValue<>(Optional.absent(), i));
CacheValue.get(i));
tableCache.put(new CacheKey<>(Integer.toString(i).concat("B")),
new CacheValue<>(Optional.of(Integer.toString(i)), i));
CacheValue.get(i, Integer.toString(i)));
}

// Epoch entries should be like (long, (key1, key2, ...))
Expand Down Expand Up @@ -165,7 +164,7 @@ public void testPartialTableCacheWithNotContinuousEntries(
epochs.add(i);
}
tableCache.put(new CacheKey<>(Long.toString(i)),
new CacheValue<>(Optional.of(Long.toString(i)), i));
CacheValue.get(i, Long.toString(i)));
totalCount++;
}

Expand Down Expand Up @@ -206,20 +205,20 @@ public void testPartialTableCacheWithOverrideEntries(
createTableCache(cacheType);

tableCache.put(new CacheKey<>(Long.toString(0)),
new CacheValue<>(Optional.of(Long.toString(0)), 0));
CacheValue.get(0, Long.toString(0)));
tableCache.put(new CacheKey<>(Long.toString(1)),
new CacheValue<>(Optional.of(Long.toString(1)), 1));
CacheValue.get(1, Long.toString(1)));
tableCache.put(new CacheKey<>(Long.toString(2)),
new CacheValue<>(Optional.of(Long.toString(2)), 2));
CacheValue.get(2, Long.toString(2)));


//Override first 2 entries
// This is to simulate a case like create mpu key, commit part1, commit
// part2. They override the same key.
tableCache.put(new CacheKey<>(Long.toString(0)),
new CacheValue<>(Optional.of(Long.toString(0)), 3));
CacheValue.get(3, Long.toString(0)));
tableCache.put(new CacheKey<>(Long.toString(1)),
new CacheValue<>(Optional.of(Long.toString(1)), 4));
CacheValue.get(4, Long.toString(1)));



Expand Down Expand Up @@ -249,7 +248,7 @@ public void testPartialTableCacheWithOverrideEntries(

// Add a new entry.
tableCache.put(new CacheKey<>(Long.toString(5)),
new CacheValue<>(Optional.of(Long.toString(5)), 5));
CacheValue.get(5, Long.toString(5)));

epochs = new ArrayList<>();
epochs.add(5L);
Expand All @@ -273,24 +272,24 @@ public void testPartialTableCacheWithOverrideAndDelete(
createTableCache(cacheType);

tableCache.put(new CacheKey<>(Long.toString(0)),
new CacheValue<>(Optional.of(Long.toString(0)), 0));
CacheValue.get(0, Long.toString(0)));
tableCache.put(new CacheKey<>(Long.toString(1)),
new CacheValue<>(Optional.of(Long.toString(1)), 1));
CacheValue.get(1, Long.toString(1)));
tableCache.put(new CacheKey<>(Long.toString(2)),
new CacheValue<>(Optional.of(Long.toString(2)), 2));
CacheValue.get(2, Long.toString(2)));


// Override entries
tableCache.put(new CacheKey<>(Long.toString(0)),
new CacheValue<>(Optional.of(Long.toString(0)), 3));
CacheValue.get(3, Long.toString(0)));
tableCache.put(new CacheKey<>(Long.toString(1)),
new CacheValue<>(Optional.of(Long.toString(1)), 4));
CacheValue.get(4, Long.toString(1)));

// Finally, mark them for deleted
tableCache.put(new CacheKey<>(Long.toString(0)),
new CacheValue<>(Optional.absent(), 5));
CacheValue.get(5));
tableCache.put(new CacheKey<>(Long.toString(1)),
new CacheValue<>(Optional.absent(), 6));
CacheValue.get(6));

// So now our cache epoch entries looks like
// 0-0, 1-1, 2-2, 0-3, 1-4, 0-5, 1-6
Expand Down Expand Up @@ -332,7 +331,7 @@ public void testPartialTableCacheWithOverrideAndDelete(

// Add a new entry, now old override entries will be cleaned up.
tableCache.put(new CacheKey<>(Long.toString(3)),
new CacheValue<>(Optional.of(Long.toString(3)), 7));
CacheValue.get(7, Long.toString(3)));

epochs = new ArrayList<>();
epochs.add(7L);
Expand Down Expand Up @@ -451,14 +450,14 @@ public void testTableCache(TableCache.CacheType cacheType) {
// In non-HA epoch entries might be out of order.
// Scenario is like create vol, set vol, set vol, delete vol
tableCache.put(new CacheKey<>(Long.toString(0)),
new CacheValue<>(Optional.of(Long.toString(0)), 0));
CacheValue.get(0, Long.toString(0)));
tableCache.put(new CacheKey<>(Long.toString(0)),
new CacheValue<>(Optional.of(Long.toString(1)), 1));
CacheValue.get(1, Long.toString(1)));
tableCache.put(new CacheKey<>(Long.toString(0)),
new CacheValue<>(Optional.of(Long.toString(2)), 3));
CacheValue.get(3, Long.toString(2)));

tableCache.put(new CacheKey<>(Long.toString(0)),
new CacheValue<>(Optional.absent(), 2));
CacheValue.get(2));

List<Long> epochs = new ArrayList<>();
epochs.add(0L);
Expand All @@ -484,17 +483,17 @@ public void testTableCacheWithNonConsecutiveEpochList(

// In non-HA epoch entries might be out of order.
tableCache.put(new CacheKey<>(Long.toString(0)),
new CacheValue<>(Optional.of(Long.toString(0)), 0));
CacheValue.get(0, Long.toString(0)));
tableCache.put(new CacheKey<>(Long.toString(0)),
new CacheValue<>(Optional.of(Long.toString(1)), 1));
CacheValue.get(1, Long.toString(1)));
tableCache.put(new CacheKey<>(Long.toString(0)),
new CacheValue<>(Optional.of(Long.toString(3)), 3));
CacheValue.get(3, Long.toString(3)));

tableCache.put(new CacheKey<>(Long.toString(0)),
new CacheValue<>(Optional.of(Long.toString(2)), 2));
CacheValue.get(2, Long.toString(2)));

tableCache.put(new CacheKey<>(Long.toString(1)),
new CacheValue<>(Optional.of(Long.toString(1)), 4));
CacheValue.get(4, Long.toString(1)));

List<Long> epochs = new ArrayList<>();
epochs.add(0L);
Expand Down Expand Up @@ -548,9 +547,9 @@ public void testTableCacheStats(TableCache.CacheType cacheType) {
createTableCache(cacheType);

tableCache.put(new CacheKey<>("0"),
new CacheValue<>(Optional.of("0"), 0));
CacheValue.get(0, "0"));
tableCache.put(new CacheKey<>("1"),
new CacheValue<>(Optional.of("1"), 1));
CacheValue.get(1, "1"));

Assertions.assertNotNull(tableCache.get(new CacheKey<>("0")));
Assertions.assertNotNull(tableCache.get(new CacheKey<>("0")));
Expand All @@ -569,7 +568,7 @@ private int writeToCache(int count, int startVal, long sleep)
int counter = 1;
while (counter <= count) {
tableCache.put(new CacheKey<>(Integer.toString(startVal)),
new CacheValue<>(Optional.of(Integer.toString(startVal)), startVal));
CacheValue.get(startVal, Integer.toString(startVal)));
startVal++;
counter++;
Thread.sleep(sleep);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -98,7 +98,6 @@
import org.apache.ozone.test.LambdaTestUtils;
import org.apache.hadoop.util.Time;

import com.google.common.base.Optional;
import com.google.common.collect.Sets;
import org.apache.commons.io.FileUtils;
import org.apache.commons.lang3.RandomStringUtils;
Expand Down Expand Up @@ -1004,7 +1003,7 @@ public void testListStatusWithTableCache() throws Exception {
.getOzoneKey(VOLUME_NAME, BUCKET_NAME, prefixKeyInCache + i);
metadataManager.getKeyTable(getDefaultBucketLayout())
.addCacheEntry(new CacheKey<>(key),
new CacheValue<>(Optional.absent(), 2L));
CacheValue.get(2L));
}
}

Expand Down Expand Up @@ -1074,7 +1073,7 @@ public void testListStatusWithTableCacheRecursive() throws Exception {
keyNameDir1Subdir1 + OZONE_URI_DELIMITER + prefixKeyInCache + i);
metadataManager.getKeyTable(getDefaultBucketLayout())
.addCacheEntry(new CacheKey<>(key),
new CacheValue<>(Optional.absent(), 2L));
CacheValue.get(2L));
}
}

Expand Down Expand Up @@ -1102,7 +1101,7 @@ public void testListStatusWithDeletedEntriesInCache() throws Exception {
// Mark as deleted in cache.
metadataManager.getKeyTable(getDefaultBucketLayout())
.addCacheEntry(new CacheKey<>(key),
new CacheValue<>(Optional.absent(), 2L));
CacheValue.get(2L));
deletedKeySet.add(key);
}
}
Expand Down Expand Up @@ -1138,7 +1137,7 @@ public void testListStatusWithDeletedEntriesInCache() throws Exception {
metadataManager.getOzoneKey(VOLUME_NAME, BUCKET_NAME, key);
metadataManager.getKeyTable(getDefaultBucketLayout())
.addCacheEntry(new CacheKey<>(ozoneKey),
new CacheValue<>(Optional.absent(), 2L));
CacheValue.get(2L));
deletedKeySet.add(key);
}
doDelete = !doDelete;
Expand Down Expand Up @@ -1187,7 +1186,7 @@ public void testListStatusWithDeletedEntriesInCache() throws Exception {
metadataManager.getOzoneKey(VOLUME_NAME, BUCKET_NAME, key);
metadataManager.getKeyTable(getDefaultBucketLayout())
.addCacheEntry(new CacheKey<>(ozoneKey),
new CacheValue<>(Optional.absent(), 2L));
CacheValue.get(2L));
deletedKeySet.add(key);
}
// Update existKeySet
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,6 @@
*/
package org.apache.hadoop.ozone.om.ratis;

import com.google.common.base.Optional;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.utils.db.cache.CacheKey;
import org.apache.hadoop.hdds.utils.db.cache.CacheValue;
Expand Down Expand Up @@ -64,15 +63,14 @@ public void testRequestWithNonExistentBucket()
String bucketName = "invalidBuck";

// Add entry to Volume Table.
OmVolumeArgs omVolumeArgs = OmVolumeArgs.newBuilder()
.setVolume(volumeName)
.setOwnerName("owner")
.setAdminName("admin")
.build();
omMetadataManager.getVolumeTable().addCacheEntry(
new CacheKey<>(omMetadataManager.getVolumeKey(volumeName)),
new CacheValue<>(
Optional.of(
OmVolumeArgs.newBuilder()
.setVolume(volumeName)
.setOwnerName("owner")
.setAdminName("admin")
.build()), 100L));
CacheValue.get(100L, omVolumeArgs));

OzoneManagerProtocolProtos.OMRequest omRequest = OMRequestTestUtils
.createCompleteMPURequest(volumeName, bucketName, "mpuKey", "mpuKeyID",
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,6 @@
import java.util.stream.Collectors;
import java.util.stream.Stream;

import com.google.common.base.Optional;
import org.apache.hadoop.hdds.client.BlockID;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.utils.TableCacheMetrics;
Expand Down Expand Up @@ -1601,13 +1600,13 @@ public void revokeSecret(String kerberosId) throws IOException {
@Override
public void put(String kerberosId, S3SecretValue secretValue, long txId) {
s3SecretTable.addCacheEntry(new CacheKey<>(kerberosId),
new CacheValue<>(Optional.of(secretValue), txId));
CacheValue.get(txId, secretValue));
}

@Override
public void invalidate(String id, long txId) {
s3SecretTable.addCacheEntry(new CacheKey<>(id),
new CacheValue<>(Optional.absent(), txId));
CacheValue.get(txId));
}

@Override
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -4215,10 +4215,10 @@ private void addS3GVolumeToDB() throws IOException {
// Add to cache.
metadataManager.getVolumeTable().addCacheEntry(
new CacheKey<>(dbVolumeKey),
new CacheValue<>(Optional.of(omVolumeArgs), transactionID));
CacheValue.get(transactionID, omVolumeArgs));
metadataManager.getUserTable().addCacheEntry(
new CacheKey<>(dbUserKey),
new CacheValue<>(Optional.of(userVolumeInfo), transactionID));
CacheValue.get(transactionID, userVolumeInfo));
LOG.info("Created Volume {} With Owner {} required for S3Gateway " +
"operations.", s3VolumeName, userName);
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,6 @@

package org.apache.hadoop.ozone.om.request.bucket;

import com.google.common.base.Optional;
import org.apache.hadoop.crypto.CipherSuite;
import org.apache.hadoop.crypto.key.KeyProvider;
import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension;
Expand Down Expand Up @@ -230,9 +229,9 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager,

// Update table cache.
metadataManager.getVolumeTable().addCacheEntry(new CacheKey<>(volumeKey),
new CacheValue<>(Optional.of(omVolumeArgs), transactionLogIndex));
CacheValue.get(transactionLogIndex, omVolumeArgs));
metadataManager.getBucketTable().addCacheEntry(new CacheKey<>(bucketKey),
new CacheValue<>(Optional.of(omBucketInfo), transactionLogIndex));
CacheValue.get(transactionLogIndex, omBucketInfo));

omResponse.setCreateBucketResponse(
CreateBucketResponse.newBuilder().build());
Expand Down

0 comments on commit e8fbdaa

Please sign in to comment.