From 3479a57354738aa50f58bdd885ce45ea4487b083 Mon Sep 17 00:00:00 2001 From: Bharat Viswanadham Date: Mon, 6 May 2019 12:11:52 -0700 Subject: [PATCH 1/8] HDDS-1499. OzoneManager Cache. --- .../org/apache/hadoop/utils/db/DBStore.java | 6 +- .../org/apache/hadoop/utils/db/RDBStore.java | 6 +- .../org/apache/hadoop/utils/db/Table.java | 29 +++- .../apache/hadoop/utils/db/TypedTable.java | 88 ++++++++++- .../hadoop/utils/db/cache/CacheKey.java | 56 +++++++ .../hadoop/utils/db/cache/CacheValue.java | 63 ++++++++ .../hadoop/utils/db/cache/EpochEntry.java | 74 +++++++++ .../hadoop/utils/db/cache/FullTableCache.java | 66 ++++++++ .../utils/db/cache/PartialTableCache.java | 96 ++++++++++++ .../hadoop/utils/db/cache/TableCache.java | 72 +++++++++ .../hadoop/utils/db/cache/package-info.java | 18 +++ .../utils/db/TestTypedRDBTableStore.java | 77 +++++++++- .../utils/db/cache/TestFullTableCache.java | 127 ++++++++++++++++ .../utils/db/cache/TestPartialTableCache.java | 143 ++++++++++++++++++ .../hadoop/utils/db/cache/package-info.java | 22 +++ .../scm/metadata/SCMMetadataStoreRDBImpl.java | 10 +- .../ozone/om/OmMetadataManagerImpl.java | 34 +++-- .../impl/ContainerDBServiceProviderImpl.java | 6 +- 18 files changed, 967 insertions(+), 26 deletions(-) create mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/cache/CacheKey.java create mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/cache/CacheValue.java create mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/cache/EpochEntry.java create mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/cache/FullTableCache.java create mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/cache/PartialTableCache.java create mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/cache/TableCache.java create mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/cache/package-info.java create mode 100644 hadoop-hdds/common/src/test/java/org/apache/hadoop/utils/db/cache/TestFullTableCache.java create mode 100644 hadoop-hdds/common/src/test/java/org/apache/hadoop/utils/db/cache/TestPartialTableCache.java create mode 100644 hadoop-hdds/common/src/test/java/org/apache/hadoop/utils/db/cache/package-info.java diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/DBStore.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/DBStore.java index 56166ab9ffc8d..d1425e2b8b37d 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/DBStore.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/DBStore.java @@ -24,6 +24,7 @@ import java.util.ArrayList; import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.utils.db.cache.TableCache; /** * The DBStore interface provides the ability to create Tables, which store @@ -44,17 +45,20 @@ public interface DBStore extends AutoCloseable { */ Table getTable(String name) throws IOException; + /** * Gets an existing TableStore with implicit key/value conversion. * * @param name - Name of the TableStore to get * @param keyType * @param valueType + * @param cachetype - Type of cache need to be used for this table. * @return - TableStore. * @throws IOException on Failure */ Table getTable(String name, - Class keyType, Class valueType) throws IOException; + Class keyType, Class valueType, + TableCache.CACHETYPE cachetype) throws IOException; /** * Lists the Known list of Tables in a DB. diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/RDBStore.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/RDBStore.java index 5bb0fa41399ba..ba99d5ab3da1f 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/RDBStore.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/RDBStore.java @@ -38,6 +38,7 @@ import org.apache.hadoop.utils.RocksDBStoreMBean; import com.google.common.base.Preconditions; +import org.apache.hadoop.utils.db.cache.TableCache; import org.apache.ratis.thirdparty.com.google.common.annotations.VisibleForTesting; import org.rocksdb.ColumnFamilyDescriptor; import org.rocksdb.ColumnFamilyHandle; @@ -258,9 +259,10 @@ public Table getTable(String name) throws IOException { @Override public Table getTable(String name, - Class keyType, Class valueType) throws IOException { + Class keyType, Class valueType, + TableCache.CACHETYPE cachetype) throws IOException { return new TypedTable(getTable(name), codecRegistry, keyType, - valueType); + valueType, cachetype); } @Override diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/Table.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/Table.java index 2f14e778ec1fd..4eb8dbb36bdf8 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/Table.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/Table.java @@ -21,8 +21,10 @@ import java.io.IOException; +import org.apache.commons.lang3.NotImplementedException; import org.apache.hadoop.classification.InterfaceStability; - +import org.apache.hadoop.utils.db.cache.CacheKey; +import org.apache.hadoop.utils.db.cache.CacheValue; /** * Interface for key-value store that stores ozone metadata. Ozone metadata is * stored as key value pairs, both key and value are arbitrary byte arrays. Each @@ -60,6 +62,9 @@ void putWithBatch(BatchOperation batch, KEY key, VALUE value) * Returns the value mapped to the given key in byte array or returns null * if the key is not found. * + * First it will check from cache, if it has entry return the value + * otherwise, get from the RocksDB table. + * * @param key metadata key * @return value in byte array or null if the key is not found. * @throws IOException on Failure @@ -97,6 +102,28 @@ void putWithBatch(BatchOperation batch, KEY key, VALUE value) */ String getName() throws IOException; + /** + * Add entry to the table cache. + * + * If the cacheKey already exists, it will override the entry. + * @param cacheKey + * @param cacheValue + */ + default void addCacheEntry(CacheKey cacheKey, + CacheValue cacheValue) { + throw new NotImplementedException("addCacheEntry is not implemented"); + } + + /** + * Removes all the entries from the table cache which are having epoch value + * less + * than or equal to specified epoch value. + * @param epoch + */ + default void cleanupCache(long epoch) { + throw new NotImplementedException("cleanupCache is not implemented"); + } + /** * Class used to represent the key and value pair of a db entry. */ diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/TypedTable.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/TypedTable.java index 667822b91d339..bddbd2dad9fef 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/TypedTable.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/TypedTable.java @@ -20,6 +20,13 @@ import java.io.IOException; +import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.utils.db.cache.CacheKey; +import org.apache.hadoop.utils.db.cache.CacheValue; +import org.apache.hadoop.utils.db.cache.FullTableCache; +import org.apache.hadoop.utils.db.cache.PartialTableCache; +import org.apache.hadoop.utils.db.cache.TableCache; + /** * Strongly typed table implementation. *

@@ -31,22 +38,40 @@ */ public class TypedTable implements Table { - private Table rawTable; + private final Table rawTable; + + private final CodecRegistry codecRegistry; - private CodecRegistry codecRegistry; + private final Class keyType; - private Class keyType; + private final Class valueType; - private Class valueType; + private final TableCache, CacheValue> cache; public TypedTable( Table rawTable, CodecRegistry codecRegistry, Class keyType, Class valueType) { + this(rawTable, codecRegistry, keyType, valueType, + null); + } + + + public TypedTable( + Table rawTable, + CodecRegistry codecRegistry, Class keyType, + Class valueType, TableCache.CACHETYPE cachetype) { this.rawTable = rawTable; this.codecRegistry = codecRegistry; this.keyType = keyType; this.valueType = valueType; + if (cachetype == TableCache.CACHETYPE.FULLCACHE) { + cache = new FullTableCache<>(); + } else if (cachetype == TableCache.CACHETYPE.PARTIALCACHE) { + cache = new PartialTableCache<>(); + } else { + cache = null; + } } @Override @@ -71,6 +96,27 @@ public boolean isEmpty() throws IOException { @Override public VALUE get(KEY key) throws IOException { + // Here the metadata lock will guarantee that cache is not updated for same + // key during get key. + if (cache != null) { + CacheValue cacheValue = cache.get(new CacheKey<>(key)); + if (cacheValue == null) { + return getFromTable(key); + } else { + // Doing this because, if the Cache Value Last operation is deleted + // means it will eventually removed from DB. So, we should return null. + if (cacheValue.getLastOperation() != CacheValue.OperationType.DELETED) { + return cacheValue.getValue(); + } else { + return null; + } + } + } else { + return getFromTable(key); + } + } + + private VALUE getFromTable(KEY key) throws IOException { byte[] keyBytes = codecRegistry.asRawData(key); byte[] valueBytes = rawTable.get(keyBytes); return codecRegistry.asObject(valueBytes, valueType); @@ -106,6 +152,40 @@ public void close() throws Exception { } + @Override + public void addCacheEntry(CacheKey cacheKey, + CacheValue cacheValue) { + // This will override the entry if there is already entry for this key. + cache.put(cacheKey, cacheValue); + } + + + @Override + public void cleanupCache(long epoch) { + cache.cleanup(epoch); + } + + @VisibleForTesting + TableCache, CacheValue> getCache() { + return cache; + } + + public Table getRawTable() { + return rawTable; + } + + public CodecRegistry getCodecRegistry() { + return codecRegistry; + } + + public Class getKeyType() { + return keyType; + } + + public Class getValueType() { + return valueType; + } + /** * Key value implementation for strongly typed tables. */ diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/cache/CacheKey.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/cache/CacheKey.java new file mode 100644 index 0000000000000..f928e4775a546 --- /dev/null +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/cache/CacheKey.java @@ -0,0 +1,56 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.utils.db.cache; + +import java.util.Objects; + +/** + * CacheKey for the RocksDB table. + * @param + */ +public class CacheKey { + + private final KEY key; + + public CacheKey(KEY key) { + Objects.requireNonNull(key, "Key Should not be null in CacheKey"); + this.key = key; + } + + public KEY getKey() { + return key; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + CacheKey cacheKey = (CacheKey) o; + return Objects.equals(key, cacheKey.key); + } + + @Override + public int hashCode() { + return Objects.hash(key); + } +} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/cache/CacheValue.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/cache/CacheValue.java new file mode 100644 index 0000000000000..68047a5bcf632 --- /dev/null +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/cache/CacheValue.java @@ -0,0 +1,63 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.utils.db.cache; + +import java.util.Objects; + +/** + * CacheValue for the RocksDB Table. + * @param + */ +public class CacheValue { + + private VALUE value; + private OperationType lastOperation; + // This value is used for evict entries from cache. + // This value is set with ratis transaction context log entry index. + private long epoch; + + public CacheValue(VALUE value, OperationType lastOperation, long epoch) { + Objects.requireNonNull(value, "Value Should not be null in CacheValue"); + this.value = value; + this.lastOperation = lastOperation; + this.epoch = epoch; + } + + public VALUE getValue() { + return value; + } + + public OperationType getLastOperation() { + return lastOperation; + } + + public long getEpoch() { + return epoch; + } + + /** + * Last happened Operation. + */ + public enum OperationType { + CREATED, + UPDATED, + DELETED + } + +} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/cache/EpochEntry.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/cache/EpochEntry.java new file mode 100644 index 0000000000000..6966b3d92d61c --- /dev/null +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/cache/EpochEntry.java @@ -0,0 +1,74 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package org.apache.hadoop.utils.db.cache; + +import java.util.Objects; + +/** + * Class used which describes epoch entry. This will be used during deletion + * entries from cache for partial table cache. + * @param + */ +public class EpochEntry implements Comparable { + + private long epoch; + private CACHEKEY cachekey; + + EpochEntry(long epoch, CACHEKEY cachekey) { + this.epoch = epoch; + this.cachekey = cachekey; + } + + public long getEpoch() { + return epoch; + } + + public CACHEKEY getCachekey() { + return cachekey; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + EpochEntry that = (EpochEntry) o; + return epoch == that.epoch && cachekey == that.cachekey; + } + + @Override + public int hashCode() { + return Objects.hash(epoch, cachekey); + } + + public int compareTo(Object o) { + if(this.epoch == ((EpochEntry)o).epoch) { + return 0; + } else if (this.epoch < ((EpochEntry)o).epoch) { + return -1; + } else { + return 1; + } + } + +} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/cache/FullTableCache.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/cache/FullTableCache.java new file mode 100644 index 0000000000000..2406fc1680ed5 --- /dev/null +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/cache/FullTableCache.java @@ -0,0 +1,66 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.utils.db.cache; + +import org.apache.hadoop.classification.InterfaceAudience.Private; +import org.apache.hadoop.classification.InterfaceStability.Evolving; + +import java.util.concurrent.ConcurrentHashMap; + +/** + * This is the full table cache, where it uses concurrentHashMap internally, + * and does not do any evict or cleanup. This full table cache need to be + * used by tables where we want to cache the entire table with out any + * cleanup to the cache + * @param + * @param + */ + +@Private +@Evolving +public class FullTableCache + implements TableCache { + + private final ConcurrentHashMap cache; + + public FullTableCache() { + cache = new ConcurrentHashMap<>(); + } + + @Override + public CACHEVALUE get(CACHEKEY cacheKey) { + return cache.get(cacheKey); + } + + + @Override + public void put(CACHEKEY cacheKey, CACHEVALUE value) { + cache.put(cacheKey, value); + } + + @Override + public void cleanup(long epoch) { + // Do nothing + } + + @Override + public int size() { + return cache.size(); + } +} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/cache/PartialTableCache.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/cache/PartialTableCache.java new file mode 100644 index 0000000000000..6f59c62263917 --- /dev/null +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/cache/PartialTableCache.java @@ -0,0 +1,96 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package org.apache.hadoop.utils.db.cache; + +import java.util.Iterator; +import java.util.TreeSet; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; + +import org.apache.hadoop.classification.InterfaceAudience.Private; +import org.apache.hadoop.classification.InterfaceStability.Evolving; + + + +/** + * This is used for the tables where we don't want to cache entire table in + * in-memory. + */ +@Private +@Evolving +public class PartialTableCache + implements TableCache{ + + private final ConcurrentHashMap cache; + private final TreeSet> epochEntries; + private ExecutorService executorService; + + + + public PartialTableCache() { + cache = new ConcurrentHashMap<>(); + epochEntries = new TreeSet>(); + // Created a singleThreadExecutor, so one cleanup will be running at a + // time. + executorService = Executors.newSingleThreadExecutor(); + } + + @Override + public CACHEVALUE get(CACHEKEY cachekey) { + return cache.get(cachekey); + } + + @Override + public void put(CACHEKEY cacheKey, CACHEVALUE value) { + cache.put(cacheKey, value); + CacheValue cacheValue = (CacheValue) cache.get(cacheKey); + epochEntries.add(new EpochEntry<>(cacheValue.getEpoch(), cacheKey)); + } + + @Override + public void cleanup(long epoch) { + executorService.submit(() -> evictCache(epoch)); + } + + @Override + public int size() { + return cache.size(); + } + + private void evictCache(long epoch) { + EpochEntry currentEntry = null; + for (Iterator iterator = epochEntries.iterator(); iterator.hasNext();) { + currentEntry = (EpochEntry) iterator.next(); + CACHEKEY cachekey = currentEntry.getCachekey(); + CacheValue cacheValue = (CacheValue) cache.get(cachekey); + if (cacheValue.getEpoch() <= epoch) { + cache.remove(cachekey); + iterator.remove(); + } + + // If currentEntry epoch is greater than epoch, we have deleted all + // entries less than specified epoch. So, we can break. + if (currentEntry.getEpoch() > epoch) { + break; + } + } + } +} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/cache/TableCache.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/cache/TableCache.java new file mode 100644 index 0000000000000..30d53c6d6d26f --- /dev/null +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/cache/TableCache.java @@ -0,0 +1,72 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package org.apache.hadoop.utils.db.cache; + +import org.apache.hadoop.classification.InterfaceAudience.Private; +import org.apache.hadoop.classification.InterfaceStability.Evolving; + +/** + * Cache used for RocksDB tables. + * @param + * @param + */ + +@Private +@Evolving +public interface TableCache { + + /** + * Return the value for the key if it is present, otherwise return null. + * @param cacheKey + * @return CACHEVALUE + */ + CACHEVALUE get(CACHEKEY cacheKey); + + /** + * Add an entry to the cache, if the key already exists it overrides. + * @param cacheKey + * @param value + */ + void put(CACHEKEY cacheKey, CACHEVALUE value); + + /** + * Removes all the entries from the cache which are having epoch value less + * than or equal to specified epoch value. + * @param epoch + */ + void cleanup(long epoch); + + /** + * Return the size of the cache. + * @return size + */ + int size(); + + /** + * Defines type of cache need to be used by OM RocksDB tables. + */ + enum CACHETYPE { + FULLCACHE, // Table uses full cache, no cleanup will be happening for the + // cache entries. + PARTIALCACHE, // Table uses partial cache, when cleanup is called it will + // evict the entries from the cache. + NOCACHE // No cache used for the table. + } +} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/cache/package-info.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/cache/package-info.java new file mode 100644 index 0000000000000..8d2506a9bfee3 --- /dev/null +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/cache/package-info.java @@ -0,0 +1,18 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.utils.db.cache; diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/utils/db/TestTypedRDBTableStore.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/utils/db/TestTypedRDBTableStore.java index 4d3b1bf79c870..f62829837718b 100644 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/utils/db/TestTypedRDBTableStore.java +++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/utils/db/TestTypedRDBTableStore.java @@ -27,9 +27,13 @@ import java.util.Set; import org.apache.hadoop.hdfs.DFSUtil; +import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.utils.db.Table.KeyValue; import org.apache.commons.lang3.RandomStringUtils; +import org.apache.hadoop.utils.db.cache.CacheKey; +import org.apache.hadoop.utils.db.cache.CacheValue; +import org.apache.hadoop.utils.db.cache.TableCache; import org.junit.After; import org.junit.Assert; import org.junit.Before; @@ -51,7 +55,7 @@ public class TestTypedRDBTableStore { Arrays.asList(DFSUtil.bytes2String(RocksDB.DEFAULT_COLUMN_FAMILY), "First", "Second", "Third", "Fourth", "Fifth", - "Sixth"); + "Sixth", "Seven"); @Rule public TemporaryFolder folder = new TemporaryFolder(); private RDBStore rdbStore = null; @@ -115,6 +119,15 @@ private Table createTypedTable(String name) String.class, String.class); } + private TypedTable createTypedTableWithCache(String name, + TableCache.CACHETYPE cachetype) + throws IOException { + return new TypedTable( + rdbStore.getTable(name), + codecRegistry, + String.class, String.class, cachetype); + } + @Test public void delete() throws Exception { List deletedKeys = new LinkedList<>(); @@ -236,4 +249,66 @@ public void forEachAndIterator() throws Exception { } } } + + @Test + public void testTypedTableWithCache() throws Exception { + int iterCount = 10; + try (Table testTable = createTypedTableWithCache( + "Seven", TableCache.CACHETYPE.FULLCACHE)) { + + for (int x = 0; x < iterCount; x++) { + String key = Integer.toString(x); + String value = Integer.toString(x); + testTable.addCacheEntry(new CacheKey<>(key), new CacheValue<>(value, + CacheValue.OperationType.CREATED, x)); + } + + // As we have added to cache, so get should return value even if it + // does not exist in DB. + for (int x = 0; x < iterCount; x++) { + Assert.assertEquals(Integer.toString(1), + testTable.get(Integer.toString(1))); + } + + } + } + + @Test + public void testTypedTableWithCacheWithFewDeletedOperationType() + throws Exception { + int iterCount = 10; + try (Table testTable = createTypedTableWithCache( + "Seven", TableCache.CACHETYPE.PARTIALCACHE)) { + + for (int x = 0; x < iterCount; x++) { + String key = Integer.toString(x); + String value = Integer.toString(x); + if (x % 2 == 0) { + testTable.addCacheEntry(new CacheKey<>(key), + new CacheValue<>(value, + CacheValue.OperationType.CREATED, x)); + } else { + testTable.addCacheEntry(new CacheKey<>(key), new CacheValue<>(value, + CacheValue.OperationType.DELETED, x)); + } + } + + // As we have added to cache, so get should return value even if it + // does not exist in DB. + for (int x = 0; x < iterCount; x++) { + if (x % 2 == 0) { + Assert.assertEquals(Integer.toString(x), + testTable.get(Integer.toString(x))); + } else { + Assert.assertNull(testTable.get(Integer.toString(x))); + } + } + + testTable.cleanupCache(5); + + GenericTestUtils.waitFor(() -> + ((TypedTable) testTable).getCache().size() == 4, + 100, 5000); + } + } } diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/utils/db/cache/TestFullTableCache.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/utils/db/cache/TestFullTableCache.java new file mode 100644 index 0000000000000..836c3ac4f0c43 --- /dev/null +++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/utils/db/cache/TestFullTableCache.java @@ -0,0 +1,127 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package org.apache.hadoop.utils.db.cache; + +import java.util.concurrent.CompletableFuture; + +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; + +import static org.junit.Assert.fail; + +/** + * Class tests FullTable cache. + */ +public class TestFullTableCache { + + private TableCache, CacheValue> tableCache; + + @Before + public void create() { + tableCache = new FullTableCache<>(); + } + + + @Test + public void testFullTableCache() { + tableCache = new FullTableCache<>(); + + + for (int i = 0; i< 10; i++) { + tableCache.put(new CacheKey<>(Integer.toString(i)), + new CacheValue<>(Integer.toString(i), + CacheValue.OperationType.CREATED, i)); + } + + + for (int i=0; i < 10; i++) { + Assert.assertEquals(Integer.toString(i), + tableCache.get(new CacheKey<>(Integer.toString(i))).getValue()); + } + + // On a full table cache if some one calls cleanup it is a no-op. + tableCache.cleanup(10); + + for (int i=0; i < 10; i++) { + Assert.assertEquals(Integer.toString(i), + tableCache.get(new CacheKey<>(Integer.toString(i))).getValue()); + } + } + + @Test + public void testFullTableCacheParallel() throws Exception { + + int totalCount = 0; + CompletableFuture future = + CompletableFuture.supplyAsync(() -> { + try { + return writeToCache(10, 0, 0); + } catch (InterruptedException ex) { + fail("writeToCache got interrupt exception"); + } + return 0; + }); + int value = future.get(); + Assert.assertEquals(10, value); + + totalCount += value; + + future = + CompletableFuture.supplyAsync(() -> { + try { + return writeToCache(10, 10, 100); + } catch (InterruptedException ex) { + fail("writeToCache got interrupt exception"); + } + return 0; + }); + + for (int i=0; i < 10; i++) { + Assert.assertEquals(Integer.toString(i), + tableCache.get(new CacheKey<>(Integer.toString(i))).getValue()); + } + + value = future.get(); + Assert.assertEquals(10, value); + + totalCount += value; + Assert.assertEquals(totalCount, tableCache.size()); + + for (int i=0; i < 20; i++) { + Assert.assertEquals(Integer.toString(i), + tableCache.get(new CacheKey<>(Integer.toString(i))).getValue()); + } + } + + private int writeToCache(int count, int startVal, long sleep) + throws InterruptedException { + int counter = 0; + while (counter < count){ + tableCache.put(new CacheKey<>(Integer.toString(startVal)), + new CacheValue<>(Integer.toString(startVal), + CacheValue.OperationType.CREATED, startVal)); + startVal++; + counter++; + Thread.sleep(sleep); + } + return count; + } +} diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/utils/db/cache/TestPartialTableCache.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/utils/db/cache/TestPartialTableCache.java new file mode 100644 index 0000000000000..0f5fb94259088 --- /dev/null +++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/utils/db/cache/TestPartialTableCache.java @@ -0,0 +1,143 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package org.apache.hadoop.utils.db.cache; + +import java.util.concurrent.CompletableFuture; + +import org.apache.hadoop.test.GenericTestUtils; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; + +import static org.junit.Assert.fail; + +/** + * Class tests partial table cache. + */ +public class TestPartialTableCache { + private TableCache, CacheValue> tableCache; + + @Before + public void create() { + tableCache = new PartialTableCache<>(); + } + @Test + public void testPartialTableCache() { + + + for (int i = 0; i< 10; i++) { + tableCache.put(new CacheKey<>(Integer.toString(i)), + new CacheValue<>(Integer.toString(i), + CacheValue.OperationType.CREATED, i)); + } + + + for (int i=0; i < 10; i++) { + Assert.assertEquals(Integer.toString(i), + tableCache.get(new CacheKey<>(Integer.toString(i))).getValue()); + } + + // On a full table cache if some one calls cleanup it is a no-op. + tableCache.cleanup(4); + + for (int i=5; i < 10; i++) { + Assert.assertEquals(Integer.toString(i), + tableCache.get(new CacheKey<>(Integer.toString(i))).getValue()); + } + } + + + @Test + public void testPartialTableCacheParallel() throws Exception { + + int totalCount = 0; + CompletableFuture future = + CompletableFuture.supplyAsync(() -> { + try { + return writeToCache(10, 1, 0); + } catch (InterruptedException ex) { + fail("writeToCache got interrupt exception"); + } + return 0; + }); + int value = future.get(); + Assert.assertEquals(10, value); + + totalCount += value; + + future = + CompletableFuture.supplyAsync(() -> { + try { + return writeToCache(10, 11, 100); + } catch (InterruptedException ex) { + fail("writeToCache got interrupt exception"); + } + return 0; + }); + + // Check we have first 10 entries in cache. + for (int i=1; i <= 10; i++) { + Assert.assertEquals(Integer.toString(i), + tableCache.get(new CacheKey<>(Integer.toString(i))).getValue()); + } + + int deleted = 5; + // cleanup first 5 entires + tableCache.cleanup(deleted); + + value = future.get(); + Assert.assertEquals(10, value); + + totalCount += value; + + // We should totalCount - deleted entries in cache. + final int tc = totalCount; + GenericTestUtils.waitFor(() -> (tc - deleted == tableCache.size()), 100, + 5000); + + // Check if we have remaining entries. + for (int i=6; i <= totalCount; i++) { + Assert.assertEquals(Integer.toString(i), + tableCache.get(new CacheKey<>(Integer.toString(i))).getValue()); + } + + tableCache.cleanup(10); + + tableCache.cleanup(totalCount); + + // Cleaned up all entries, so cache size should be zero. + GenericTestUtils.waitFor(() -> (0 == tableCache.size()), 100, + 5000); + } + + private int writeToCache(int count, int startVal, long sleep) + throws InterruptedException { + int counter = 1; + while (counter <= count){ + tableCache.put(new CacheKey<>(Integer.toString(startVal)), + new CacheValue<>(Integer.toString(startVal), + CacheValue.OperationType.CREATED, startVal)); + startVal++; + counter++; + Thread.sleep(sleep); + } + return count; + } +} diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/utils/db/cache/package-info.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/utils/db/cache/package-info.java new file mode 100644 index 0000000000000..b46cf614e8a88 --- /dev/null +++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/utils/db/cache/package-info.java @@ -0,0 +1,22 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ +/** + * Tests for the DB Cache Utilities. + */ +package org.apache.hadoop.utils.db.cache; \ No newline at end of file diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/metadata/SCMMetadataStoreRDBImpl.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/metadata/SCMMetadataStoreRDBImpl.java index fde754c9acdd4..25be9c93929a7 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/metadata/SCMMetadataStoreRDBImpl.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/metadata/SCMMetadataStoreRDBImpl.java @@ -33,6 +33,7 @@ import org.apache.hadoop.hdds.protocol.proto .StorageContainerDatanodeProtocolProtos.DeletedBlocksTransaction; import org.apache.hadoop.utils.db.TableIterator; +import org.apache.hadoop.utils.db.cache.TableCache; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -104,15 +105,18 @@ public void start(OzoneConfiguration config) .build(); deletedBlocksTable = this.store.getTable(DELETED_BLOCKS_TABLE, - Long.class, DeletedBlocksTransaction.class); + Long.class, DeletedBlocksTransaction.class, + TableCache.CACHETYPE.NOCACHE); checkTableStatus(deletedBlocksTable, DELETED_BLOCKS_TABLE); validCertsTable = this.store.getTable(VALID_CERTS_TABLE, - BigInteger.class, X509Certificate.class); + BigInteger.class, X509Certificate.class, + TableCache.CACHETYPE.NOCACHE); checkTableStatus(validCertsTable, VALID_CERTS_TABLE); revokedCertsTable = this.store.getTable(REVOKED_CERTS_TABLE, - BigInteger.class, X509Certificate.class); + BigInteger.class, X509Certificate.class, + TableCache.CACHETYPE.NOCACHE); checkTableStatus(revokedCertsTable, REVOKED_CERTS_TABLE); } } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java index 793af665db68e..cf35e5b78a1fd 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java @@ -59,6 +59,8 @@ import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_OPEN_KEY_EXPIRE_THRESHOLD_SECONDS_DEFAULT; import static org.apache.hadoop.ozone.OzoneConsts.OM_DB_NAME; import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX; + +import org.apache.hadoop.utils.db.cache.TableCache; import org.eclipse.jetty.util.StringUtil; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -245,42 +247,50 @@ protected DBStoreBuilder addOMTablesAndCodecs(DBStoreBuilder builder) { */ protected void initializeOmTables() throws IOException { userTable = - this.store.getTable(USER_TABLE, String.class, VolumeList.class); + this.store.getTable(USER_TABLE, String.class, VolumeList.class, + TableCache.CACHETYPE.PARTIALCACHE); checkTableStatus(userTable, USER_TABLE); - this.store.getTable(VOLUME_TABLE, String.class, - String.class); volumeTable = - this.store.getTable(VOLUME_TABLE, String.class, OmVolumeArgs.class); + this.store.getTable(VOLUME_TABLE, String.class, OmVolumeArgs.class, + TableCache.CACHETYPE.FULLCACHE); checkTableStatus(volumeTable, VOLUME_TABLE); bucketTable = - this.store.getTable(BUCKET_TABLE, String.class, OmBucketInfo.class); + this.store.getTable(BUCKET_TABLE, String.class, OmBucketInfo.class, + TableCache.CACHETYPE.FULLCACHE); + checkTableStatus(bucketTable, BUCKET_TABLE); - keyTable = this.store.getTable(KEY_TABLE, String.class, OmKeyInfo.class); + keyTable = this.store.getTable(KEY_TABLE, String.class, OmKeyInfo.class, + TableCache.CACHETYPE.PARTIALCACHE); checkTableStatus(keyTable, KEY_TABLE); deletedTable = - this.store.getTable(DELETED_TABLE, String.class, OmKeyInfo.class); + this.store.getTable(DELETED_TABLE, String.class, OmKeyInfo.class, + TableCache.CACHETYPE.NOCACHE); checkTableStatus(deletedTable, DELETED_TABLE); openKeyTable = - this.store.getTable(OPEN_KEY_TABLE, String.class, OmKeyInfo.class); + this.store.getTable(OPEN_KEY_TABLE, String.class, OmKeyInfo.class, + TableCache.CACHETYPE.PARTIALCACHE); checkTableStatus(openKeyTable, OPEN_KEY_TABLE); - s3Table = this.store.getTable(S3_TABLE, String.class, String.class); + s3Table = this.store.getTable(S3_TABLE, String.class, String.class, + TableCache.CACHETYPE.PARTIALCACHE); checkTableStatus(s3Table, S3_TABLE); multipartInfoTable = this.store.getTable(MULTIPARTINFO_TABLE, - String.class, OmMultipartKeyInfo.class); + String.class, OmMultipartKeyInfo.class, + TableCache.CACHETYPE.PARTIALCACHE); checkTableStatus(multipartInfoTable, MULTIPARTINFO_TABLE); dTokenTable = this.store.getTable(DELEGATION_TOKEN_TABLE, - OzoneTokenIdentifier.class, Long.class); + OzoneTokenIdentifier.class, Long.class, + TableCache.CACHETYPE.PARTIALCACHE); checkTableStatus(dTokenTable, DELEGATION_TOKEN_TABLE); s3SecretTable = this.store.getTable(S3_SECRET_TABLE, String.class, - S3SecretValue.class); + S3SecretValue.class, TableCache.CACHETYPE.PARTIALCACHE); checkTableStatus(s3SecretTable, S3_SECRET_TABLE); } diff --git a/hadoop-ozone/ozone-recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/ContainerDBServiceProviderImpl.java b/hadoop-ozone/ozone-recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/ContainerDBServiceProviderImpl.java index 3a20e82c9dc67..60e309c0ca133 100644 --- a/hadoop-ozone/ozone-recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/ContainerDBServiceProviderImpl.java +++ b/hadoop-ozone/ozone-recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/ContainerDBServiceProviderImpl.java @@ -39,6 +39,7 @@ import org.apache.hadoop.utils.db.Table; import org.apache.hadoop.utils.db.Table.KeyValue; import org.apache.hadoop.utils.db.TableIterator; +import org.apache.hadoop.utils.db.cache.TableCache; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -64,7 +65,8 @@ public class ContainerDBServiceProviderImpl public ContainerDBServiceProviderImpl(DBStore dbStore) { try { this.containerKeyTable = dbStore.getTable(CONTAINER_KEY_TABLE, - ContainerKeyPrefix.class, Integer.class); + ContainerKeyPrefix.class, Integer.class, + TableCache.CACHETYPE.NOCACHE); } catch (IOException e) { LOG.error("Unable to create Container Key Table. " + e); } @@ -85,7 +87,7 @@ public void initNewContainerDB(Map File oldDBLocation = containerDbStore.getDbLocation(); containerDbStore = ReconContainerDBProvider.getNewDBStore(configuration); containerKeyTable = containerDbStore.getTable(CONTAINER_KEY_TABLE, - ContainerKeyPrefix.class, Integer.class); + ContainerKeyPrefix.class, Integer.class, TableCache.CACHETYPE.NOCACHE); if (oldDBLocation.exists()) { LOG.info("Cleaning up old Recon Container DB at {}.", From 2aa8432579ad3e1c5d953ad07247e8210005ff25 Mon Sep 17 00:00:00 2001 From: Bharat Viswanadham Date: Wed, 8 May 2019 21:58:15 -0700 Subject: [PATCH 2/8] fix review comments --- .../main/java/org/apache/hadoop/utils/db/Table.java | 2 -- .../java/org/apache/hadoop/utils/db/TypedTable.java | 11 +++++++++++ .../hadoop/utils/db/cache/PartialTableCache.java | 8 +++----- .../hadoop/utils/db/TestTypedRDBTableStore.java | 13 +++++++++++++ 4 files changed, 27 insertions(+), 7 deletions(-) diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/Table.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/Table.java index 4eb8dbb36bdf8..70f340fdd2203 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/Table.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/Table.java @@ -62,8 +62,6 @@ void putWithBatch(BatchOperation batch, KEY key, VALUE value) * Returns the value mapped to the given key in byte array or returns null * if the key is not found. * - * First it will check from cache, if it has entry return the value - * otherwise, get from the RocksDB table. * * @param key metadata key * @return value in byte array or null if the key is not found. diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/TypedTable.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/TypedTable.java index bddbd2dad9fef..ff6e94995cc33 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/TypedTable.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/TypedTable.java @@ -94,6 +94,17 @@ public boolean isEmpty() throws IOException { return rawTable.isEmpty(); } + /** + * Returns the value mapped to the given key in byte array or returns null + * if the key is not found. + * + * First it will check from cache, if it has entry return the value + * otherwise, get from the RocksDB table. + * + * @param key metadata key + * @return VALUE + * @throws IOException + */ @Override public VALUE get(KEY key) throws IOException { // Here the metadata lock will guarantee that cache is not updated for same diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/cache/PartialTableCache.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/cache/PartialTableCache.java index 6f59c62263917..b10c81fe9e9b7 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/cache/PartialTableCache.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/cache/PartialTableCache.java @@ -84,11 +84,9 @@ private void evictCache(long epoch) { if (cacheValue.getEpoch() <= epoch) { cache.remove(cachekey); iterator.remove(); - } - - // If currentEntry epoch is greater than epoch, we have deleted all - // entries less than specified epoch. So, we can break. - if (currentEntry.getEpoch() > epoch) { + } else { + // If currentEntry epoch is greater than epoch, we have deleted all + // entries less than specified epoch. So, we can break. break; } } diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/utils/db/TestTypedRDBTableStore.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/utils/db/TestTypedRDBTableStore.java index f62829837718b..e9748d284fe31 100644 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/utils/db/TestTypedRDBTableStore.java +++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/utils/db/TestTypedRDBTableStore.java @@ -309,6 +309,19 @@ public void testTypedTableWithCacheWithFewDeletedOperationType() GenericTestUtils.waitFor(() -> ((TypedTable) testTable).getCache().size() == 4, 100, 5000); + + + //Check remaining values + for (int x = 6; x < iterCount; x++) { + if (x % 2 == 0) { + Assert.assertEquals(Integer.toString(x), + testTable.get(Integer.toString(x))); + } else { + Assert.assertNull(testTable.get(Integer.toString(x))); + } + } + + } } } From b6b559599a0783082f059c9300876909d61e8b97 Mon Sep 17 00:00:00 2001 From: Bharat Viswanadham Date: Thu, 9 May 2019 10:18:56 -0700 Subject: [PATCH 3/8] fix review comments --- .../hadoop/utils/db/cache/FullTableCache.java | 3 ++- .../hadoop/utils/db/cache/PartialTableCache.java | 16 +++++++++------- .../apache/hadoop/utils/db/cache/TableCache.java | 3 ++- 3 files changed, 13 insertions(+), 9 deletions(-) diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/cache/FullTableCache.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/cache/FullTableCache.java index 2406fc1680ed5..5b11496f1b27c 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/cache/FullTableCache.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/cache/FullTableCache.java @@ -34,7 +34,8 @@ @Private @Evolving -public class FullTableCache +public class FullTableCache implements TableCache { private final ConcurrentHashMap cache; diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/cache/PartialTableCache.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/cache/PartialTableCache.java index b10c81fe9e9b7..983894f32fd6a 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/cache/PartialTableCache.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/cache/PartialTableCache.java @@ -36,8 +36,9 @@ */ @Private @Evolving -public class PartialTableCache - implements TableCache{ +public class PartialTableCache + implements TableCache { private final ConcurrentHashMap cache; private final TreeSet> epochEntries; @@ -47,7 +48,7 @@ public class PartialTableCache public PartialTableCache() { cache = new ConcurrentHashMap<>(); - epochEntries = new TreeSet>(); + epochEntries = new TreeSet<>(); // Created a singleThreadExecutor, so one cleanup will be running at a // time. executorService = Executors.newSingleThreadExecutor(); @@ -61,7 +62,7 @@ public CACHEVALUE get(CACHEKEY cachekey) { @Override public void put(CACHEKEY cacheKey, CACHEVALUE value) { cache.put(cacheKey, value); - CacheValue cacheValue = (CacheValue) cache.get(cacheKey); + CacheValue cacheValue = cache.get(cacheKey); epochEntries.add(new EpochEntry<>(cacheValue.getEpoch(), cacheKey)); } @@ -77,10 +78,11 @@ public int size() { private void evictCache(long epoch) { EpochEntry currentEntry = null; - for (Iterator iterator = epochEntries.iterator(); iterator.hasNext();) { - currentEntry = (EpochEntry) iterator.next(); + for (Iterator> iterator = epochEntries.iterator(); + iterator.hasNext();) { + currentEntry = iterator.next(); CACHEKEY cachekey = currentEntry.getCachekey(); - CacheValue cacheValue = (CacheValue) cache.get(cachekey); + CacheValue cacheValue = cache.get(cachekey); if (cacheValue.getEpoch() <= epoch) { cache.remove(cachekey); iterator.remove(); diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/cache/TableCache.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/cache/TableCache.java index 30d53c6d6d26f..e9cf5534995ab 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/cache/TableCache.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/cache/TableCache.java @@ -30,7 +30,8 @@ @Private @Evolving -public interface TableCache { +public interface TableCache { /** * Return the value for the key if it is present, otherwise return null. From 7d25984ca336a718acb2f70d8babebd7cc31932c Mon Sep 17 00:00:00 2001 From: Bharat Viswanadham Date: Wed, 15 May 2019 11:27:25 -0700 Subject: [PATCH 4/8] fix review comments. --- .../org/apache/hadoop/utils/db/RDBTable.java | 10 +++++--- .../org/apache/hadoop/utils/db/Table.java | 3 +++ .../apache/hadoop/utils/db/TypedTable.java | 25 ++++++++----------- .../hadoop/utils/db/cache/CacheValue.java | 24 +++--------------- .../hadoop/utils/db/cache/TableCache.java | 3 ++- .../utils/db/TestTypedRDBTableStore.java | 14 ++++++----- .../utils/db/cache/TestFullTableCache.java | 7 +++--- .../utils/db/cache/TestPartialTableCache.java | 7 +++--- 8 files changed, 40 insertions(+), 53 deletions(-) diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/RDBTable.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/RDBTable.java index 88b0411d3e1cf..7bbe9d91b171b 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/RDBTable.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/RDBTable.java @@ -22,6 +22,7 @@ import java.io.IOException; import java.nio.charset.StandardCharsets; +import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.hdfs.DFSUtil; import org.rocksdb.ColumnFamilyHandle; @@ -33,9 +34,12 @@ import org.slf4j.LoggerFactory; /** - * RocksDB implementation of ozone metadata store. + * RocksDB implementation of ozone metadata store. This class should be only + * used as part of TypedTable as it's underlying implementation to access the + * metadata store content. All other user's using Table should use TypedTable. */ -public class RDBTable implements Table { +@InterfaceAudience.Private +class RDBTable implements Table { private static final Logger LOG = @@ -52,7 +56,7 @@ public class RDBTable implements Table { * @param handle - ColumnFamily Handle. * @param writeOptions - RocksDB write Options. */ - public RDBTable(RocksDB db, ColumnFamilyHandle handle, + RDBTable(RocksDB db, ColumnFamilyHandle handle, WriteOptions writeOptions) { this.db = db; this.handle = handle; diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/Table.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/Table.java index 70f340fdd2203..34b7fb7d060a7 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/Table.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/Table.java @@ -62,6 +62,9 @@ void putWithBatch(BatchOperation batch, KEY key, VALUE value) * Returns the value mapped to the given key in byte array or returns null * if the key is not found. * + * Caller's of this method should use synchronization mechanism, when + * accessing, as underlying we first check cache, if it does not exist in + * the cache use RocksDB table. * * @param key metadata key * @return value in byte array or null if the key is not found. diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/TypedTable.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/TypedTable.java index ff6e94995cc33..6efdd69205495 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/TypedTable.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/TypedTable.java @@ -98,8 +98,9 @@ public boolean isEmpty() throws IOException { * Returns the value mapped to the given key in byte array or returns null * if the key is not found. * - * First it will check from cache, if it has entry return the value - * otherwise, get from the RocksDB table. + * Caller's of this method should use synchronization mechanism, when + * accessing, as underlying we first check cache, if it does not exist in + * the cache use RocksDB table. * * @param key metadata key * @return VALUE @@ -110,21 +111,15 @@ public VALUE get(KEY key) throws IOException { // Here the metadata lock will guarantee that cache is not updated for same // key during get key. if (cache != null) { - CacheValue cacheValue = cache.get(new CacheKey<>(key)); - if (cacheValue == null) { - return getFromTable(key); - } else { - // Doing this because, if the Cache Value Last operation is deleted - // means it will eventually removed from DB. So, we should return null. - if (cacheValue.getLastOperation() != CacheValue.OperationType.DELETED) { - return cacheValue.getValue(); - } else { - return null; - } + CacheValue< VALUE > cacheValue = cache.get(new CacheKey<>(key)); + if (cacheValue != null) { + // We have a value in cache, return the value. + return cacheValue.getValue(); } - } else { - return getFromTable(key); } + // If no cache for the table or if it does not exist in cache get from + // RocksDB table. + return getFromTable(key); } private VALUE getFromTable(KEY key) throws IOException { diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/cache/CacheValue.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/cache/CacheValue.java index 68047a5bcf632..34f77ae175295 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/cache/CacheValue.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/cache/CacheValue.java @@ -18,7 +18,7 @@ package org.apache.hadoop.utils.db.cache; -import java.util.Objects; +import com.google.common.base.Optional; /** * CacheValue for the RocksDB Table. @@ -26,38 +26,22 @@ */ public class CacheValue { - private VALUE value; - private OperationType lastOperation; + private Optional value; // This value is used for evict entries from cache. // This value is set with ratis transaction context log entry index. private long epoch; - public CacheValue(VALUE value, OperationType lastOperation, long epoch) { - Objects.requireNonNull(value, "Value Should not be null in CacheValue"); + public CacheValue(Optional value, long epoch) { this.value = value; - this.lastOperation = lastOperation; this.epoch = epoch; } public VALUE getValue() { - return value; - } - - public OperationType getLastOperation() { - return lastOperation; + return value.orNull(); } public long getEpoch() { return epoch; } - /** - * Last happened Operation. - */ - public enum OperationType { - CREATED, - UPDATED, - DELETED - } - } diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/cache/TableCache.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/cache/TableCache.java index e9cf5534995ab..cb0b9aa05a81e 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/cache/TableCache.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/cache/TableCache.java @@ -49,7 +49,8 @@ public interface TableCache(key), new CacheValue<>(value, - CacheValue.OperationType.CREATED, x)); + testTable.addCacheEntry(new CacheKey<>(key), + new CacheValue<>(Optional.of(value), + x)); } // As we have added to cache, so get should return value even if it @@ -285,11 +287,11 @@ public void testTypedTableWithCacheWithFewDeletedOperationType() String value = Integer.toString(x); if (x % 2 == 0) { testTable.addCacheEntry(new CacheKey<>(key), - new CacheValue<>(value, - CacheValue.OperationType.CREATED, x)); + new CacheValue<>(Optional.of(value), x)); } else { - testTable.addCacheEntry(new CacheKey<>(key), new CacheValue<>(value, - CacheValue.OperationType.DELETED, x)); + testTable.addCacheEntry(new CacheKey<>(key), + new CacheValue<>(Optional.absent(), + x)); } } diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/utils/db/cache/TestFullTableCache.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/utils/db/cache/TestFullTableCache.java index 836c3ac4f0c43..246febb75f5c6 100644 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/utils/db/cache/TestFullTableCache.java +++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/utils/db/cache/TestFullTableCache.java @@ -21,6 +21,7 @@ import java.util.concurrent.CompletableFuture; +import com.google.common.base.Optional; import org.junit.Assert; import org.junit.Before; import org.junit.Test; @@ -47,8 +48,7 @@ public void testFullTableCache() { for (int i = 0; i< 10; i++) { tableCache.put(new CacheKey<>(Integer.toString(i)), - new CacheValue<>(Integer.toString(i), - CacheValue.OperationType.CREATED, i)); + new CacheValue<>(Optional.of(Integer.toString(i)), i)); } @@ -116,8 +116,7 @@ private int writeToCache(int count, int startVal, long sleep) int counter = 0; while (counter < count){ tableCache.put(new CacheKey<>(Integer.toString(startVal)), - new CacheValue<>(Integer.toString(startVal), - CacheValue.OperationType.CREATED, startVal)); + new CacheValue<>(Optional.of(Integer.toString(startVal)), startVal)); startVal++; counter++; Thread.sleep(sleep); diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/utils/db/cache/TestPartialTableCache.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/utils/db/cache/TestPartialTableCache.java index 0f5fb94259088..f70665960e2e2 100644 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/utils/db/cache/TestPartialTableCache.java +++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/utils/db/cache/TestPartialTableCache.java @@ -21,6 +21,7 @@ import java.util.concurrent.CompletableFuture; +import com.google.common.base.Optional; import org.apache.hadoop.test.GenericTestUtils; import org.junit.Assert; import org.junit.Before; @@ -44,8 +45,7 @@ public void testPartialTableCache() { for (int i = 0; i< 10; i++) { tableCache.put(new CacheKey<>(Integer.toString(i)), - new CacheValue<>(Integer.toString(i), - CacheValue.OperationType.CREATED, i)); + new CacheValue<>(Optional.of(Integer.toString(i)), i)); } @@ -132,8 +132,7 @@ private int writeToCache(int count, int startVal, long sleep) int counter = 1; while (counter <= count){ tableCache.put(new CacheKey<>(Integer.toString(startVal)), - new CacheValue<>(Integer.toString(startVal), - CacheValue.OperationType.CREATED, startVal)); + new CacheValue<>(Optional.of(Integer.toString(startVal)), startVal)); startVal++; counter++; Thread.sleep(sleep); From a08d166e73e16c48bd0c80d1ff79357d6c0e8078 Mon Sep 17 00:00:00 2001 From: Bharat Viswanadham Date: Wed, 15 May 2019 11:42:59 -0700 Subject: [PATCH 5/8] fix java doc --- .../src/main/java/org/apache/hadoop/utils/db/Table.java | 4 ---- .../src/main/java/org/apache/hadoop/utils/db/TypedTable.java | 4 ++-- 2 files changed, 2 insertions(+), 6 deletions(-) diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/Table.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/Table.java index 34b7fb7d060a7..905a68b064624 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/Table.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/Table.java @@ -62,10 +62,6 @@ void putWithBatch(BatchOperation batch, KEY key, VALUE value) * Returns the value mapped to the given key in byte array or returns null * if the key is not found. * - * Caller's of this method should use synchronization mechanism, when - * accessing, as underlying we first check cache, if it does not exist in - * the cache use RocksDB table. - * * @param key metadata key * @return value in byte array or null if the key is not found. * @throws IOException on Failure diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/TypedTable.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/TypedTable.java index 6efdd69205495..ea8c885d8a2a9 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/TypedTable.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/TypedTable.java @@ -99,8 +99,8 @@ public boolean isEmpty() throws IOException { * if the key is not found. * * Caller's of this method should use synchronization mechanism, when - * accessing, as underlying we first check cache, if it does not exist in - * the cache use RocksDB table. + * accessing. First it will check from cache, if it has entry return the + * value, otherwise get from the RocksDB table. * * @param key metadata key * @return VALUE From 7c5c481a075a2733fff7f68286cdf9864b957188 Mon Sep 17 00:00:00 2001 From: Bharat Viswanadham Date: Wed, 15 May 2019 14:23:35 -0700 Subject: [PATCH 6/8] fix review comments and remove cache type --- .../org/apache/hadoop/utils/db/DBStore.java | 5 +- .../org/apache/hadoop/utils/db/RDBStore.java | 6 +- .../apache/hadoop/utils/db/TypedTable.java | 36 ++--- .../hadoop/utils/db/cache/FullTableCache.java | 67 ---------- .../utils/db/cache/PartialTableCache.java | 9 +- .../hadoop/utils/db/cache/TableCache.java | 11 -- .../utils/db/TestTypedRDBTableStore.java | 18 +-- .../utils/db/cache/TestFullTableCache.java | 126 ------------------ .../scm/metadata/SCMMetadataStoreRDBImpl.java | 10 +- .../ozone/om/OmMetadataManagerImpl.java | 30 ++--- .../impl/ContainerDBServiceProviderImpl.java | 6 +- 11 files changed, 35 insertions(+), 289 deletions(-) delete mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/cache/FullTableCache.java delete mode 100644 hadoop-hdds/common/src/test/java/org/apache/hadoop/utils/db/cache/TestFullTableCache.java diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/DBStore.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/DBStore.java index d1425e2b8b37d..9e0c4a4b42c26 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/DBStore.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/DBStore.java @@ -24,7 +24,6 @@ import java.util.ArrayList; import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.utils.db.cache.TableCache; /** * The DBStore interface provides the ability to create Tables, which store @@ -52,13 +51,11 @@ public interface DBStore extends AutoCloseable { * @param name - Name of the TableStore to get * @param keyType * @param valueType - * @param cachetype - Type of cache need to be used for this table. * @return - TableStore. * @throws IOException on Failure */ Table getTable(String name, - Class keyType, Class valueType, - TableCache.CACHETYPE cachetype) throws IOException; + Class keyType, Class valueType) throws IOException; /** * Lists the Known list of Tables in a DB. diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/RDBStore.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/RDBStore.java index ba99d5ab3da1f..5bb0fa41399ba 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/RDBStore.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/RDBStore.java @@ -38,7 +38,6 @@ import org.apache.hadoop.utils.RocksDBStoreMBean; import com.google.common.base.Preconditions; -import org.apache.hadoop.utils.db.cache.TableCache; import org.apache.ratis.thirdparty.com.google.common.annotations.VisibleForTesting; import org.rocksdb.ColumnFamilyDescriptor; import org.rocksdb.ColumnFamilyHandle; @@ -259,10 +258,9 @@ public Table getTable(String name) throws IOException { @Override public Table getTable(String name, - Class keyType, Class valueType, - TableCache.CACHETYPE cachetype) throws IOException { + Class keyType, Class valueType) throws IOException { return new TypedTable(getTable(name), codecRegistry, keyType, - valueType, cachetype); + valueType); } @Override diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/TypedTable.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/TypedTable.java index ea8c885d8a2a9..6de65090a92fa 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/TypedTable.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/TypedTable.java @@ -23,7 +23,6 @@ import com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.utils.db.cache.CacheKey; import org.apache.hadoop.utils.db.cache.CacheValue; -import org.apache.hadoop.utils.db.cache.FullTableCache; import org.apache.hadoop.utils.db.cache.PartialTableCache; import org.apache.hadoop.utils.db.cache.TableCache; @@ -48,30 +47,16 @@ public class TypedTable implements Table { private final TableCache, CacheValue> cache; - public TypedTable( - Table rawTable, - CodecRegistry codecRegistry, Class keyType, - Class valueType) { - this(rawTable, codecRegistry, keyType, valueType, - null); - } - public TypedTable( Table rawTable, CodecRegistry codecRegistry, Class keyType, - Class valueType, TableCache.CACHETYPE cachetype) { + Class valueType) { this.rawTable = rawTable; this.codecRegistry = codecRegistry; this.keyType = keyType; this.valueType = valueType; - if (cachetype == TableCache.CACHETYPE.FULLCACHE) { - cache = new FullTableCache<>(); - } else if (cachetype == TableCache.CACHETYPE.PARTIALCACHE) { - cache = new PartialTableCache<>(); - } else { - cache = null; - } + cache = new PartialTableCache<>(); } @Override @@ -110,16 +95,15 @@ public boolean isEmpty() throws IOException { public VALUE get(KEY key) throws IOException { // Here the metadata lock will guarantee that cache is not updated for same // key during get key. - if (cache != null) { - CacheValue< VALUE > cacheValue = cache.get(new CacheKey<>(key)); - if (cacheValue != null) { - // We have a value in cache, return the value. - return cacheValue.getValue(); - } + CacheValue< VALUE > cacheValue = cache.get(new CacheKey<>(key)); + if (cacheValue == null) { + // If no cache for the table or if it does not exist in cache get from + // RocksDB table. + return getFromTable(key); + } else { + // We have a value in cache, return the value. + return cacheValue.getValue(); } - // If no cache for the table or if it does not exist in cache get from - // RocksDB table. - return getFromTable(key); } private VALUE getFromTable(KEY key) throws IOException { diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/cache/FullTableCache.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/cache/FullTableCache.java deleted file mode 100644 index 5b11496f1b27c..0000000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/cache/FullTableCache.java +++ /dev/null @@ -1,67 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.utils.db.cache; - -import org.apache.hadoop.classification.InterfaceAudience.Private; -import org.apache.hadoop.classification.InterfaceStability.Evolving; - -import java.util.concurrent.ConcurrentHashMap; - -/** - * This is the full table cache, where it uses concurrentHashMap internally, - * and does not do any evict or cleanup. This full table cache need to be - * used by tables where we want to cache the entire table with out any - * cleanup to the cache - * @param - * @param - */ - -@Private -@Evolving -public class FullTableCache - implements TableCache { - - private final ConcurrentHashMap cache; - - public FullTableCache() { - cache = new ConcurrentHashMap<>(); - } - - @Override - public CACHEVALUE get(CACHEKEY cacheKey) { - return cache.get(cacheKey); - } - - - @Override - public void put(CACHEKEY cacheKey, CACHEVALUE value) { - cache.put(cacheKey, value); - } - - @Override - public void cleanup(long epoch) { - // Do nothing - } - - @Override - public int size() { - return cache.size(); - } -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/cache/PartialTableCache.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/cache/PartialTableCache.java index 983894f32fd6a..0bf46524178e9 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/cache/PartialTableCache.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/cache/PartialTableCache.java @@ -28,17 +28,14 @@ import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.classification.InterfaceStability.Evolving; - - /** - * This is used for the tables where we don't want to cache entire table in - * in-memory. + * Cache implementation for the table, this cache is partial cache, this will + * be cleaned up, after entries are flushed to DB. */ @Private @Evolving public class PartialTableCache - implements TableCache { + CACHEVALUE extends CacheValue> implements TableCache { private final ConcurrentHashMap cache; private final TreeSet> epochEntries; diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/cache/TableCache.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/cache/TableCache.java index cb0b9aa05a81e..70e0b33e92974 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/cache/TableCache.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/cache/TableCache.java @@ -60,15 +60,4 @@ public interface TableCache createTypedTable(String name) String.class, String.class); } - private TypedTable createTypedTableWithCache(String name, - TableCache.CACHETYPE cachetype) - throws IOException { - return new TypedTable( - rdbStore.getTable(name), - codecRegistry, - String.class, String.class, cachetype); - } - @Test public void delete() throws Exception { List deletedKeys = new LinkedList<>(); @@ -254,8 +244,8 @@ public void forEachAndIterator() throws Exception { @Test public void testTypedTableWithCache() throws Exception { int iterCount = 10; - try (Table testTable = createTypedTableWithCache( - "Seven", TableCache.CACHETYPE.FULLCACHE)) { + try (Table testTable = createTypedTable( + "Seven")) { for (int x = 0; x < iterCount; x++) { String key = Integer.toString(x); @@ -279,8 +269,8 @@ public void testTypedTableWithCache() throws Exception { public void testTypedTableWithCacheWithFewDeletedOperationType() throws Exception { int iterCount = 10; - try (Table testTable = createTypedTableWithCache( - "Seven", TableCache.CACHETYPE.PARTIALCACHE)) { + try (Table testTable = createTypedTable( + "Seven")) { for (int x = 0; x < iterCount; x++) { String key = Integer.toString(x); diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/utils/db/cache/TestFullTableCache.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/utils/db/cache/TestFullTableCache.java deleted file mode 100644 index 246febb75f5c6..0000000000000 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/utils/db/cache/TestFullTableCache.java +++ /dev/null @@ -1,126 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package org.apache.hadoop.utils.db.cache; - -import java.util.concurrent.CompletableFuture; - -import com.google.common.base.Optional; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Test; - -import static org.junit.Assert.fail; - -/** - * Class tests FullTable cache. - */ -public class TestFullTableCache { - - private TableCache, CacheValue> tableCache; - - @Before - public void create() { - tableCache = new FullTableCache<>(); - } - - - @Test - public void testFullTableCache() { - tableCache = new FullTableCache<>(); - - - for (int i = 0; i< 10; i++) { - tableCache.put(new CacheKey<>(Integer.toString(i)), - new CacheValue<>(Optional.of(Integer.toString(i)), i)); - } - - - for (int i=0; i < 10; i++) { - Assert.assertEquals(Integer.toString(i), - tableCache.get(new CacheKey<>(Integer.toString(i))).getValue()); - } - - // On a full table cache if some one calls cleanup it is a no-op. - tableCache.cleanup(10); - - for (int i=0; i < 10; i++) { - Assert.assertEquals(Integer.toString(i), - tableCache.get(new CacheKey<>(Integer.toString(i))).getValue()); - } - } - - @Test - public void testFullTableCacheParallel() throws Exception { - - int totalCount = 0; - CompletableFuture future = - CompletableFuture.supplyAsync(() -> { - try { - return writeToCache(10, 0, 0); - } catch (InterruptedException ex) { - fail("writeToCache got interrupt exception"); - } - return 0; - }); - int value = future.get(); - Assert.assertEquals(10, value); - - totalCount += value; - - future = - CompletableFuture.supplyAsync(() -> { - try { - return writeToCache(10, 10, 100); - } catch (InterruptedException ex) { - fail("writeToCache got interrupt exception"); - } - return 0; - }); - - for (int i=0; i < 10; i++) { - Assert.assertEquals(Integer.toString(i), - tableCache.get(new CacheKey<>(Integer.toString(i))).getValue()); - } - - value = future.get(); - Assert.assertEquals(10, value); - - totalCount += value; - Assert.assertEquals(totalCount, tableCache.size()); - - for (int i=0; i < 20; i++) { - Assert.assertEquals(Integer.toString(i), - tableCache.get(new CacheKey<>(Integer.toString(i))).getValue()); - } - } - - private int writeToCache(int count, int startVal, long sleep) - throws InterruptedException { - int counter = 0; - while (counter < count){ - tableCache.put(new CacheKey<>(Integer.toString(startVal)), - new CacheValue<>(Optional.of(Integer.toString(startVal)), startVal)); - startVal++; - counter++; - Thread.sleep(sleep); - } - return count; - } -} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/metadata/SCMMetadataStoreRDBImpl.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/metadata/SCMMetadataStoreRDBImpl.java index 25be9c93929a7..fde754c9acdd4 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/metadata/SCMMetadataStoreRDBImpl.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/metadata/SCMMetadataStoreRDBImpl.java @@ -33,7 +33,6 @@ import org.apache.hadoop.hdds.protocol.proto .StorageContainerDatanodeProtocolProtos.DeletedBlocksTransaction; import org.apache.hadoop.utils.db.TableIterator; -import org.apache.hadoop.utils.db.cache.TableCache; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -105,18 +104,15 @@ public void start(OzoneConfiguration config) .build(); deletedBlocksTable = this.store.getTable(DELETED_BLOCKS_TABLE, - Long.class, DeletedBlocksTransaction.class, - TableCache.CACHETYPE.NOCACHE); + Long.class, DeletedBlocksTransaction.class); checkTableStatus(deletedBlocksTable, DELETED_BLOCKS_TABLE); validCertsTable = this.store.getTable(VALID_CERTS_TABLE, - BigInteger.class, X509Certificate.class, - TableCache.CACHETYPE.NOCACHE); + BigInteger.class, X509Certificate.class); checkTableStatus(validCertsTable, VALID_CERTS_TABLE); revokedCertsTable = this.store.getTable(REVOKED_CERTS_TABLE, - BigInteger.class, X509Certificate.class, - TableCache.CACHETYPE.NOCACHE); + BigInteger.class, X509Certificate.class); checkTableStatus(revokedCertsTable, REVOKED_CERTS_TABLE); } } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java index cf35e5b78a1fd..6987927b173d6 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java @@ -60,7 +60,6 @@ import static org.apache.hadoop.ozone.OzoneConsts.OM_DB_NAME; import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX; -import org.apache.hadoop.utils.db.cache.TableCache; import org.eclipse.jetty.util.StringUtil; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -247,50 +246,41 @@ protected DBStoreBuilder addOMTablesAndCodecs(DBStoreBuilder builder) { */ protected void initializeOmTables() throws IOException { userTable = - this.store.getTable(USER_TABLE, String.class, VolumeList.class, - TableCache.CACHETYPE.PARTIALCACHE); + this.store.getTable(USER_TABLE, String.class, VolumeList.class); checkTableStatus(userTable, USER_TABLE); volumeTable = - this.store.getTable(VOLUME_TABLE, String.class, OmVolumeArgs.class, - TableCache.CACHETYPE.FULLCACHE); + this.store.getTable(VOLUME_TABLE, String.class, OmVolumeArgs.class); checkTableStatus(volumeTable, VOLUME_TABLE); bucketTable = - this.store.getTable(BUCKET_TABLE, String.class, OmBucketInfo.class, - TableCache.CACHETYPE.FULLCACHE); + this.store.getTable(BUCKET_TABLE, String.class, OmBucketInfo.class); checkTableStatus(bucketTable, BUCKET_TABLE); - keyTable = this.store.getTable(KEY_TABLE, String.class, OmKeyInfo.class, - TableCache.CACHETYPE.PARTIALCACHE); + keyTable = this.store.getTable(KEY_TABLE, String.class, OmKeyInfo.class); checkTableStatus(keyTable, KEY_TABLE); deletedTable = - this.store.getTable(DELETED_TABLE, String.class, OmKeyInfo.class, - TableCache.CACHETYPE.NOCACHE); + this.store.getTable(DELETED_TABLE, String.class, OmKeyInfo.class); checkTableStatus(deletedTable, DELETED_TABLE); openKeyTable = - this.store.getTable(OPEN_KEY_TABLE, String.class, OmKeyInfo.class, - TableCache.CACHETYPE.PARTIALCACHE); + this.store.getTable(OPEN_KEY_TABLE, String.class, OmKeyInfo.class); checkTableStatus(openKeyTable, OPEN_KEY_TABLE); - s3Table = this.store.getTable(S3_TABLE, String.class, String.class, - TableCache.CACHETYPE.PARTIALCACHE); + s3Table = this.store.getTable(S3_TABLE, String.class, String.class); checkTableStatus(s3Table, S3_TABLE); multipartInfoTable = this.store.getTable(MULTIPARTINFO_TABLE, - String.class, OmMultipartKeyInfo.class, - TableCache.CACHETYPE.PARTIALCACHE); + String.class, OmMultipartKeyInfo.class); checkTableStatus(multipartInfoTable, MULTIPARTINFO_TABLE); dTokenTable = this.store.getTable(DELEGATION_TOKEN_TABLE, - OzoneTokenIdentifier.class, Long.class, - TableCache.CACHETYPE.PARTIALCACHE); + OzoneTokenIdentifier.class, Long.class); checkTableStatus(dTokenTable, DELEGATION_TOKEN_TABLE); s3SecretTable = this.store.getTable(S3_SECRET_TABLE, String.class, - S3SecretValue.class, TableCache.CACHETYPE.PARTIALCACHE); + S3SecretValue.class); checkTableStatus(s3SecretTable, S3_SECRET_TABLE); } diff --git a/hadoop-ozone/ozone-recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/ContainerDBServiceProviderImpl.java b/hadoop-ozone/ozone-recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/ContainerDBServiceProviderImpl.java index 60e309c0ca133..3a20e82c9dc67 100644 --- a/hadoop-ozone/ozone-recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/ContainerDBServiceProviderImpl.java +++ b/hadoop-ozone/ozone-recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/ContainerDBServiceProviderImpl.java @@ -39,7 +39,6 @@ import org.apache.hadoop.utils.db.Table; import org.apache.hadoop.utils.db.Table.KeyValue; import org.apache.hadoop.utils.db.TableIterator; -import org.apache.hadoop.utils.db.cache.TableCache; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -65,8 +64,7 @@ public class ContainerDBServiceProviderImpl public ContainerDBServiceProviderImpl(DBStore dbStore) { try { this.containerKeyTable = dbStore.getTable(CONTAINER_KEY_TABLE, - ContainerKeyPrefix.class, Integer.class, - TableCache.CACHETYPE.NOCACHE); + ContainerKeyPrefix.class, Integer.class); } catch (IOException e) { LOG.error("Unable to create Container Key Table. " + e); } @@ -87,7 +85,7 @@ public void initNewContainerDB(Map File oldDBLocation = containerDbStore.getDbLocation(); containerDbStore = ReconContainerDBProvider.getNewDBStore(configuration); containerKeyTable = containerDbStore.getTable(CONTAINER_KEY_TABLE, - ContainerKeyPrefix.class, Integer.class, TableCache.CACHETYPE.NOCACHE); + ContainerKeyPrefix.class, Integer.class); if (oldDBLocation.exists()) { LOG.info("Cleaning up old Recon Container DB at {}.", From a3ccf358d75f04c44abac8f0acc1cebbf204fde0 Mon Sep 17 00:00:00 2001 From: Bharat Viswanadham Date: Fri, 17 May 2019 10:08:50 -0700 Subject: [PATCH 7/8] fix review comments. --- .../org/apache/hadoop/utils/db/cache/PartialTableCache.java | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/cache/PartialTableCache.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/cache/PartialTableCache.java index 0bf46524178e9..6404cf24b21aa 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/cache/PartialTableCache.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/cache/PartialTableCache.java @@ -59,8 +59,7 @@ public CACHEVALUE get(CACHEKEY cachekey) { @Override public void put(CACHEKEY cacheKey, CACHEVALUE value) { cache.put(cacheKey, value); - CacheValue cacheValue = cache.get(cacheKey); - epochEntries.add(new EpochEntry<>(cacheValue.getEpoch(), cacheKey)); + epochEntries.add(new EpochEntry<>(value.getEpoch(), cacheKey)); } @Override From 7349675d3c7645bd1abbad61ffb9594c0f3a0eda Mon Sep 17 00:00:00 2001 From: Bharat Viswanadham Date: Fri, 17 May 2019 15:27:58 -0700 Subject: [PATCH 8/8] fix review comments. --- .../apache/hadoop/utils/db/cache/PartialTableCache.java | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/cache/PartialTableCache.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/cache/PartialTableCache.java index 6404cf24b21aa..4d3711269a168 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/cache/PartialTableCache.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/cache/PartialTableCache.java @@ -24,7 +24,9 @@ import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; +import java.util.concurrent.ThreadFactory; +import com.google.common.util.concurrent.ThreadFactoryBuilder; import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.classification.InterfaceStability.Evolving; @@ -48,7 +50,10 @@ public PartialTableCache() { epochEntries = new TreeSet<>(); // Created a singleThreadExecutor, so one cleanup will be running at a // time. - executorService = Executors.newSingleThreadExecutor(); + ThreadFactory build = new ThreadFactoryBuilder().setDaemon(true) + .setNameFormat("PartialTableCache Cleanup Thread - %d").build(); + executorService = Executors.newSingleThreadExecutor(build); + } @Override