Skip to content

Commit

Permalink
Merge "Fix handling of schema alters and eviction in cache" from Tomasz
Browse files Browse the repository at this point in the history
"
Fixes #5134, Eviction concurrent with preempted partition entry update after
  memtable flush may allow stale data to be populated into cache.

Fixes #5135, Cache reads may miss some writes if schema alter followed by a
  read happened concurrently with preempted partition entry update.

Fixes #5127, Cache populating read concurrent with schema alter may use the
  wrong schema version to interpret sstable data.

Fixes #5128, Reads of multi-row partitions concurrent with memtable flush may
  fail or cause a node crash after schema alter.
"

* tag 'fix-cache-issues-with-schema-alter-and-eviction-v2' of github.com:tgrabiec/scylla:
  tests: row_cache: Introduce test_alter_then_preempted_update_then_memtable_read
  tests: row_cache_stress_test: Verify all entries are evictable at the end
  tests: row_cache_stress_test: Exercise single-partition reads
  tests: row_cache_stress_test: Add periodic schema alters
  tests: memtable_snapshot_source: Allow changing the schema
  tests: simple_schema: Prepare for schema altering
  row_cache: Record upgraded schema in memtable entries during update
  memtable: Extract memtable_entry::upgrade_schema()
  row_cache, mvcc: Prevent locked snapshots from being evicted
  row_cache: Make evict() not use invalidate_unwrapped()
  mvcc: Introduce partition_snapshot::touch()
  row_cache, mvcc: Do not upgrade schema of entries which are being updated
  row_cache: Use the correct schema version to populate the partition entry
  delegating_reader: Optimize fill_buffer()
  row_cache, memtable: Use upgrade_schema()
  flat_mutation_reader: Introduce upgrade_schema()
  • Loading branch information
avikivity committed Oct 7, 2019
2 parents f2f0f5e + 020a537 commit 8ed6f94
Show file tree
Hide file tree
Showing 15 changed files with 313 additions and 89 deletions.
54 changes: 41 additions & 13 deletions cache_flat_mutation_reader.hh
Original file line number Diff line number Diff line change
Expand Up @@ -61,6 +61,7 @@ class cache_flat_mutation_reader final : public flat_mutation_reader::impl {
// - _last_row points at a direct predecessor of the next row which is going to be read.
// Used for populating continuity.
// - _population_range_starts_before_all_rows is set accordingly
// - _underlying is engaged and fast-forwarded
reading_from_underlying,

end_of_stream
Expand Down Expand Up @@ -99,7 +100,13 @@ class cache_flat_mutation_reader final : public flat_mutation_reader::impl {
// forward progress is not guaranteed in case iterators are getting constantly invalidated.
bool _lower_bound_changed = false;

// Points to the underlying reader conforming to _schema,
// either to *_underlying_holder or _read_context->underlying().underlying().
flat_mutation_reader* _underlying = nullptr;
std::optional<flat_mutation_reader> _underlying_holder;

future<> do_fill_buffer(db::timeout_clock::time_point);
future<> ensure_underlying(db::timeout_clock::time_point);
void copy_from_cache_to_buffer();
future<> process_static_row(db::timeout_clock::time_point);
void move_to_end();
Expand Down Expand Up @@ -186,23 +193,22 @@ future<> cache_flat_mutation_reader::process_static_row(db::timeout_clock::time_
return make_ready_future<>();
} else {
_read_context->cache().on_row_miss();
return _read_context->get_next_fragment(timeout).then([this] (mutation_fragment_opt&& sr) {
if (sr) {
assert(sr->is_static_row());
maybe_add_to_cache(sr->as_static_row());
push_mutation_fragment(std::move(*sr));
}
maybe_set_static_row_continuous();
return ensure_underlying(timeout).then([this, timeout] {
return (*_underlying)(timeout).then([this] (mutation_fragment_opt&& sr) {
if (sr) {
assert(sr->is_static_row());
maybe_add_to_cache(sr->as_static_row());
push_mutation_fragment(std::move(*sr));
}
maybe_set_static_row_continuous();
});
});
}
}

inline
void cache_flat_mutation_reader::touch_partition() {
if (_snp->at_latest_version()) {
rows_entry& last_dummy = *_snp->version()->partition().clustered_rows().rbegin();
_snp->tracker()->touch(last_dummy);
}
_snp->touch();
}

inline
Expand Down Expand Up @@ -232,14 +238,36 @@ future<> cache_flat_mutation_reader::fill_buffer(db::timeout_clock::time_point t
});
}

inline
future<> cache_flat_mutation_reader::ensure_underlying(db::timeout_clock::time_point timeout) {
if (_underlying) {
return make_ready_future<>();
}
return _read_context->ensure_underlying(timeout).then([this, timeout] {
flat_mutation_reader& ctx_underlying = _read_context->underlying().underlying();
if (ctx_underlying.schema() != _schema) {
_underlying_holder = make_delegating_reader(ctx_underlying);
_underlying_holder->upgrade_schema(_schema);
_underlying = &*_underlying_holder;
} else {
_underlying = &ctx_underlying;
}
});
}

inline
future<> cache_flat_mutation_reader::do_fill_buffer(db::timeout_clock::time_point timeout) {
if (_state == state::move_to_underlying) {
if (!_underlying) {
return ensure_underlying(timeout).then([this, timeout] {
return do_fill_buffer(timeout);
});
}
_state = state::reading_from_underlying;
_population_range_starts_before_all_rows = _lower_bound.is_before_all_clustered_rows(*_schema);
auto end = _next_row_in_range ? position_in_partition(_next_row.position())
: position_in_partition(_upper_bound);
return _read_context->fast_forward_to(position_range{_lower_bound, std::move(end)}, timeout).then([this, timeout] {
return _underlying->fast_forward_to(position_range{_lower_bound, std::move(end)}, timeout).then([this, timeout] {
return read_from_underlying(timeout);
});
}
Expand Down Expand Up @@ -280,7 +308,7 @@ future<> cache_flat_mutation_reader::do_fill_buffer(db::timeout_clock::time_poin

inline
future<> cache_flat_mutation_reader::read_from_underlying(db::timeout_clock::time_point timeout) {
return consume_mutation_fragments_until(_read_context->underlying().underlying(),
return consume_mutation_fragments_until(*_underlying,
[this] { return _state != state::reading_from_underlying || is_buffer_full(); },
[this] (mutation_fragment mf) {
_read_context->cache().on_row_miss();
Expand Down
5 changes: 5 additions & 0 deletions flat_mutation_reader.cc
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@
#include "mutation_reader.hh"
#include "seastar/util/reference_wrapper.hh"
#include "clustering_ranges_walker.hh"
#include "schema_upgrader.hh"
#include <algorithm>

#include <boost/range/adaptor/transformed.hpp>
Expand Down Expand Up @@ -916,6 +917,10 @@ flat_mutation_reader make_generating_reader(schema_ptr s, std::function<future<m
return make_flat_mutation_reader<generating_reader>(std::move(s), std::move(get_next_fragment));
}

void flat_mutation_reader::do_upgrade_schema(const schema_ptr& s) {
*this = transform(std::move(*this), schema_upgrader(s));
}

bool mutation_fragment_stream_validator::operator()(const dht::decorated_key& dk) {
// FIXME: Validate partition key monotonicity
// https://github.com/scylladb/scylla/issues/4804
Expand Down
17 changes: 15 additions & 2 deletions flat_mutation_reader.hh
Original file line number Diff line number Diff line change
Expand Up @@ -331,6 +331,7 @@ private:
flat_mutation_reader() = default;
explicit operator bool() const noexcept { return bool(_impl); }
friend class optimized_optional<flat_mutation_reader>;
void do_upgrade_schema(const schema_ptr&);
public:
// Documented in mutation_reader::forwarding in mutation_reader.hh.
class partition_range_forwarding_tag;
Expand Down Expand Up @@ -520,6 +521,14 @@ public:
void move_buffer_content_to(impl& other) {
_impl->move_buffer_content_to(other);
}

// Causes this reader to conform to s.
// Multiple calls of upgrade_schema() compose, effects of prior calls on the stream are preserved.
void upgrade_schema(const schema_ptr& s) {
if (__builtin_expect(s != schema(), false)) {
do_upgrade_schema(s);
}
}
};

using flat_mutation_reader_opt = optimized_optional<flat_mutation_reader>;
Expand Down Expand Up @@ -622,8 +631,12 @@ class delegating_reader : public flat_mutation_reader::impl {
public:
delegating_reader(Underlying&& r) : impl(to_reference(r).schema()), _underlying(std::forward<Underlying>(r)) { }
virtual future<> fill_buffer(db::timeout_clock::time_point timeout) override {
return fill_buffer_from(to_reference(_underlying), timeout).then([this] (bool underlying_finished) {
_end_of_stream = underlying_finished;
if (is_buffer_full()) {
return make_ready_future<>();
}
return to_reference(_underlying).fill_buffer(timeout).then([this] {
_end_of_stream = to_reference(_underlying).is_end_of_stream();
to_reference(_underlying).move_buffer_content_to(*this);
});
}
virtual future<> fast_forward_to(position_range pr, db::timeout_clock::time_point timeout) override {
Expand Down
32 changes: 14 additions & 18 deletions memtable.cc
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,6 @@
#include "database.hh"
#include "frozen_mutation.hh"
#include "partition_snapshot_reader.hh"
#include "schema_upgrader.hh"
#include "partition_builder.hh"

void memtable::memtable_encoding_stats_collector::update_timestamp(api::timestamp_type ts) {
Expand Down Expand Up @@ -429,11 +428,8 @@ class scanning_reader final : public flat_mutation_reader::impl, private iterato
bool digest_requested = _slice.options.contains<query::partition_slice::option::with_digest>();
auto mpsr = make_partition_snapshot_flat_reader(snp_schema, std::move(key_and_snp->first), std::move(cr),
std::move(key_and_snp->second), digest_requested, region(), read_section(), mtbl(), streamed_mutation::forwarding::no);
if (snp_schema->version() != schema()->version()) {
_delegate = transform(std::move(mpsr), schema_upgrader(schema()));
} else {
_delegate = std::move(mpsr);
}
mpsr.upgrade_schema(schema());
_delegate = std::move(mpsr);
} else {
_end_of_stream = true;
}
Expand Down Expand Up @@ -588,11 +584,8 @@ class flush_reader final : public flat_mutation_reader::impl, private iterator_r
auto snp_schema = key_and_snp->second->schema();
auto mpsr = make_partition_snapshot_flat_reader<partition_snapshot_accounter>(snp_schema, std::move(key_and_snp->first), std::move(cr),
std::move(key_and_snp->second), false, region(), read_section(), mtbl(), streamed_mutation::forwarding::no, *snp_schema, _flushed_memory);
if (snp_schema->version() != schema()->version()) {
_partition_reader = transform(std::move(mpsr), schema_upgrader(schema()));
} else {
_partition_reader = std::move(mpsr);
}
mpsr.upgrade_schema(schema());
_partition_reader = std::move(mpsr);
}
}
public:
Expand Down Expand Up @@ -668,11 +661,8 @@ memtable::make_flat_reader(schema_ptr s,
bool digest_requested = slice.options.contains<query::partition_slice::option::with_digest>();
auto rd = make_partition_snapshot_flat_reader(snp_schema, std::move(dk), std::move(cr), std::move(snp), digest_requested,
*this, _read_section, shared_from_this(), fwd);
if (snp_schema->version() != s->version()) {
return transform(std::move(rd), schema_upgrader(s));
} else {
return rd;
}
rd.upgrade_schema(s);
return rd;
} else {
auto res = make_flat_mutation_reader<scanning_reader>(std::move(s), shared_from_this(), range, slice, pc, fwd_mr);
if (fwd == streamed_mutation::forwarding::yes) {
Expand Down Expand Up @@ -787,13 +777,19 @@ bool memtable::is_flushed() const {
return bool(_underlying);
}

void memtable_entry::upgrade_schema(const schema_ptr& s, mutation_cleaner& cleaner) {
if (_schema != s) {
partition().upgrade(_schema, s, cleaner, no_cache_tracker);
_schema = s;
}
}

void memtable::upgrade_entry(memtable_entry& e) {
if (e._schema != _schema) {
assert(!reclaiming_enabled());
with_allocator(allocator(), [this, &e] {
with_linearized_managed_bytes([&] {
e.partition().upgrade(e._schema, _schema, cleaner(), no_cache_tracker);
e._schema = _schema;
e.upgrade_schema(_schema, cleaner());
});
});
}
Expand Down
4 changes: 4 additions & 0 deletions memtable.hh
Original file line number Diff line number Diff line change
Expand Up @@ -69,6 +69,10 @@ public:
schema_ptr& schema() { return _schema; }
partition_snapshot_ptr snapshot(memtable& mtbl);

// Makes the entry conform to given schema.
// Must be called under allocating section of the region which owns the entry.
void upgrade_schema(const schema_ptr&, mutation_cleaner&);

size_t external_memory_usage_without_rows() const {
return _key.key().external_memory_usage();
}
Expand Down
41 changes: 36 additions & 5 deletions partition_version.cc
Original file line number Diff line number Diff line change
Expand Up @@ -172,6 +172,9 @@ tombstone partition_entry::partition_tombstone() const {

partition_snapshot::~partition_snapshot() {
with_allocator(region().allocator(), [this] {
if (_locked) {
touch();
}
if (_version && _version.is_unique_owner()) {
auto v = &*_version;
_version = {};
Expand Down Expand Up @@ -268,6 +271,7 @@ partition_entry::~partition_entry() {
return;
}
if (_snapshot) {
assert(!_snapshot->is_locked());
_snapshot->_version = std::move(_version);
_snapshot->_version.mark_as_unique_owner();
_snapshot->_entry = nullptr;
Expand All @@ -284,6 +288,7 @@ stop_iteration partition_entry::clear_gently(cache_tracker* tracker) noexcept {
}

if (_snapshot) {
assert(!_snapshot->is_locked());
_snapshot->_version = std::move(_version);
_snapshot->_version.mark_as_unique_owner();
_snapshot->_entry = nullptr;
Expand Down Expand Up @@ -311,6 +316,7 @@ stop_iteration partition_entry::clear_gently(cache_tracker* tracker) noexcept {
void partition_entry::set_version(partition_version* new_version)
{
if (_snapshot) {
assert(!_snapshot->is_locked());
_snapshot->_version = std::move(_version);
_snapshot->_entry = nullptr;
}
Expand Down Expand Up @@ -459,7 +465,6 @@ class partition_entry::rows_iterator final {

coroutine partition_entry::apply_to_incomplete(const schema& s,
partition_entry&& pe,
const schema& pe_schema,
mutation_cleaner& pe_cleaner,
logalloc::allocating_section& alloc,
logalloc::region& reg,
Expand All @@ -479,10 +484,6 @@ coroutine partition_entry::apply_to_incomplete(const schema& s,
// partitions where I saw 40% slow down.
const bool preemptible = s.clustering_key_size() > 0;

if (s.version() != pe_schema.version()) {
pe.upgrade(pe_schema.shared_from_this(), s.shared_from_this(), pe_cleaner, no_cache_tracker);
}

// When preemptible, later memtable reads could start using the snapshot before
// snapshot's writes are made visible in cache, which would cause them to miss those writes.
// So we cannot allow erasing when preemptible.
Expand All @@ -496,6 +497,7 @@ coroutine partition_entry::apply_to_incomplete(const schema& s,
prev_snp = read(reg, tracker.cleaner(), s.shared_from_this(), &tracker, phase - 1);
}
auto dst_snp = read(reg, tracker.cleaner(), s.shared_from_this(), &tracker, phase);
dst_snp->lock();

// Once we start updating the partition, we must keep all snapshots until the update completes,
// otherwise partial writes would be published. So the scope of snapshots must enclose the scope
Expand Down Expand Up @@ -570,6 +572,7 @@ coroutine partition_entry::apply_to_incomplete(const schema& s,
auto has_next = src_cur.erase_and_advance();
acc.unpin_memory(size);
if (!has_next) {
dst_snp->unlock();
return stop_iteration::yes;
}
} while (!preemptible || !need_preempt());
Expand Down Expand Up @@ -661,6 +664,18 @@ partition_snapshot::range_tombstones()
position_in_partition_view::after_all_clustered_rows());
}

void partition_snapshot::touch() noexcept {
// Eviction assumes that older versions are evicted before newer so only the latest snapshot
// can be touched.
if (_tracker && at_latest_version()) {
auto&& rows = version()->partition().clustered_rows();
assert(!rows.empty());
rows_entry& last_dummy = *rows.rbegin();
assert(last_dummy.is_last_dummy());
_tracker->touch(last_dummy);
}
}

std::ostream& operator<<(std::ostream& out, const partition_entry::printer& p) {
auto& e = p._partition_entry;
out << "{";
Expand Down Expand Up @@ -688,6 +703,7 @@ void partition_entry::evict(mutation_cleaner& cleaner) noexcept {
return;
}
if (_snapshot) {
assert(!_snapshot->is_locked());
_snapshot->_version = std::move(_version);
_snapshot->_version.mark_as_unique_owner();
_snapshot->_entry = nullptr;
Expand All @@ -707,3 +723,18 @@ partition_snapshot_ptr::~partition_snapshot_ptr() {
}
}
}

void partition_snapshot::lock() noexcept {
// partition_entry::is_locked() assumes that if there is a locked snapshot,
// it can be found attached directly to it.
assert(at_latest_version());
_locked = true;
}

void partition_snapshot::unlock() noexcept {
// Locked snapshots must always be latest, is_locked() assumes that.
// Also, touch() is only effective when this snapshot is latest.
assert(at_latest_version());
_locked = false;
touch(); // Make the entry evictable again in case it was fully unlinked by eviction attempt.
}
Loading

0 comments on commit 8ed6f94

Please sign in to comment.