Skip to content

Commit

Permalink
shared_ptr aliasing
Browse files Browse the repository at this point in the history
  • Loading branch information
cbi42 committed Sep 13, 2022
1 parent 3ba9070 commit af3b15e
Show file tree
Hide file tree
Showing 4 changed files with 20 additions and 21 deletions.
27 changes: 15 additions & 12 deletions db/memtable.cc
Original file line number Diff line number Diff line change
Expand Up @@ -561,23 +561,23 @@ FragmentedRangeTombstoneIterator* MemTable::NewRangeTombstoneIteratorInternal(
}

// takes current cache
std::shared_ptr<std::shared_ptr<FragmentedRangeTombstoneListCache>> cache =
std::shared_ptr<FragmentedRangeTombstoneListCache> cache =
std::atomic_load_explicit(cached_range_tombstone_.Access(),
std::memory_order_relaxed);
// construct fragmented tombstone list if necessary
if (!(*cache)->initialized.load(std::memory_order_acquire)) {
(*cache)->reader_mutex.lock();
if (!(*cache)->tombstones) {
if (!cache->initialized.load(std::memory_order_acquire)) {
cache->reader_mutex.lock();
if (!cache->tombstones) {
auto* unfragmented_iter =
new MemTableIterator(*this, read_options, nullptr /* arena */,
true /* use_range_del_table */);
(*cache)->tombstones = std::make_unique<FragmentedRangeTombstoneList>(
cache->tombstones = std::make_unique<FragmentedRangeTombstoneList>(
FragmentedRangeTombstoneList(
std::unique_ptr<InternalIterator>(unfragmented_iter),
comparator_.comparator));
(*cache)->initialized.store(true, std::memory_order_release);
cache->initialized.store(true, std::memory_order_release);
}
(*cache)->reader_mutex.unlock();
cache->reader_mutex.unlock();
}

return new FragmentedRangeTombstoneIterator(cache, comparator_.comparator,
Expand Down Expand Up @@ -832,21 +832,24 @@ Status MemTable::Add(SequenceNumber s, ValueType type,
}
if (type == kTypeRangeDeletion) {
auto new_cache = std::make_shared<FragmentedRangeTombstoneListCache>();
auto size = cached_range_tombstone_.Size();
size_t size = cached_range_tombstone_.Size();
if (allow_concurrent) {
range_del_mutex_.lock();
}
for (size_t i = 0; i < size; ++i) {
auto cache = cached_range_tombstone_.AccessAtCore(i);
std::shared_ptr<FragmentedRangeTombstoneListCache>* local_cache_ref_ptr =
cached_range_tombstone_.AccessAtCore(i);
auto new_local_cache_ref = std::make_shared<
const std::shared_ptr<FragmentedRangeTombstoneListCache>>(new_cache);
// It is okay for some reader to load old cache during invalidation as
// the new sequence number is not published yet.
// Each core will have a shared_ptr to a shared_ptr to the cached
// fragmented range tombstones, so that ref count is maintianed locally
// per-core using the per-core shared_ptr.
std::atomic_store_explicit(
cache,
std::make_shared<std::shared_ptr<FragmentedRangeTombstoneListCache>>(
new_cache),
local_cache_ref_ptr,
std::shared_ptr<FragmentedRangeTombstoneListCache>(
new_local_cache_ref, new_cache.get()),
std::memory_order_relaxed);
}
if (allow_concurrent) {
Expand Down
3 changes: 1 addition & 2 deletions db/memtable.h
Original file line number Diff line number Diff line change
Expand Up @@ -538,8 +538,7 @@ class MemTable {

// makes sure there is a single range tombstone writer to invalidate cache
std::mutex range_del_mutex_;
CoreLocalArray<
std::shared_ptr<std::shared_ptr<FragmentedRangeTombstoneListCache>>>
CoreLocalArray<std::shared_ptr<FragmentedRangeTombstoneListCache>>
cached_range_tombstone_;

private:
Expand Down
5 changes: 2 additions & 3 deletions db/range_tombstone_fragmenter.cc
Original file line number Diff line number Diff line change
Expand Up @@ -252,16 +252,15 @@ FragmentedRangeTombstoneIterator::FragmentedRangeTombstoneIterator(
}

FragmentedRangeTombstoneIterator::FragmentedRangeTombstoneIterator(
const std::shared_ptr<std::shared_ptr<FragmentedRangeTombstoneListCache>>&
tombstones_cache,
const std::shared_ptr<FragmentedRangeTombstoneListCache>& tombstones_cache,
const InternalKeyComparator& icmp, SequenceNumber _upper_bound,
SequenceNumber _lower_bound)
: tombstone_start_cmp_(icmp.user_comparator()),
tombstone_end_cmp_(icmp.user_comparator()),
icmp_(&icmp),
ucmp_(icmp.user_comparator()),
tombstones_cache_ref_(tombstones_cache),
tombstones_((*tombstones_cache_ref_)->tombstones.get()),
tombstones_(tombstones_cache_ref_->tombstones.get()),
upper_bound_(_upper_bound),
lower_bound_(_lower_bound) {
assert(tombstones_ != nullptr);
Expand Down
6 changes: 2 additions & 4 deletions db/range_tombstone_fragmenter.h
Original file line number Diff line number Diff line change
Expand Up @@ -123,8 +123,7 @@ class FragmentedRangeTombstoneIterator : public InternalIterator {
const InternalKeyComparator& icmp, SequenceNumber upper_bound,
SequenceNumber lower_bound = 0);
FragmentedRangeTombstoneIterator(
const std::shared_ptr<std::shared_ptr<FragmentedRangeTombstoneListCache>>&
tombstones,
const std::shared_ptr<FragmentedRangeTombstoneListCache>& tombstones,
const InternalKeyComparator& icmp, SequenceNumber upper_bound,
SequenceNumber lower_bound = 0);

Expand Down Expand Up @@ -274,8 +273,7 @@ class FragmentedRangeTombstoneIterator : public InternalIterator {
const InternalKeyComparator* icmp_;
const Comparator* ucmp_;
std::shared_ptr<const FragmentedRangeTombstoneList> tombstones_ref_;
std::shared_ptr<std::shared_ptr<FragmentedRangeTombstoneListCache>>
tombstones_cache_ref_;
std::shared_ptr<FragmentedRangeTombstoneListCache> tombstones_cache_ref_;
const FragmentedRangeTombstoneList* tombstones_;
SequenceNumber upper_bound_;
SequenceNumber lower_bound_;
Expand Down

0 comments on commit af3b15e

Please sign in to comment.