Skip to content

Commit

Permalink
build: disable implicit fallthrough
Browse files Browse the repository at this point in the history
Prevent switch case statements from falling through without annotation
([[fallthrough]]) proving that this was intended.

Existing intended cases were annotated.

Closes #14607
  • Loading branch information
avikivity authored and tgrabiec committed Jul 10, 2023
1 parent d645e7a commit 0cabf4e
Show file tree
Hide file tree
Showing 12 changed files with 77 additions and 3 deletions.
1 change: 1 addition & 0 deletions compaction/time_window_compaction_strategy.cc
Original file line number Diff line number Diff line change
Expand Up @@ -428,6 +428,7 @@ void time_window_compaction_strategy::update_estimated_compaction_by_tasks(time_
break;
case bucket_compaction_mode::major:
n++;
break;
default:
break;
}
Expand Down
1 change: 1 addition & 0 deletions configure.py
Original file line number Diff line number Diff line change
Expand Up @@ -1415,6 +1415,7 @@ def find_headers(repodir, excluded_dirs):
warnings = [
'-Wall',
'-Werror',
'-Wimplicit-fallthrough',
'-Wno-mismatched-tags', # clang-only
'-Wno-tautological-compare',
'-Wno-c++11-narrowing',
Expand Down
7 changes: 7 additions & 0 deletions db/consistency_level.cc
Original file line number Diff line number Diff line change
Expand Up @@ -86,6 +86,7 @@ size_t block_for_each_quorum(const locator::effective_replication_map& erm) {
size_t block_for(const locator::effective_replication_map& erm, consistency_level cl) {
switch (cl) {
case consistency_level::ONE:
[[fallthrough]];
case consistency_level::LOCAL_ONE:
return 1;
case consistency_level::ANY:
Expand All @@ -95,11 +96,13 @@ size_t block_for(const locator::effective_replication_map& erm, consistency_leve
case consistency_level::THREE:
return 3;
case consistency_level::QUORUM:
[[fallthrough]];
case consistency_level::SERIAL:
return quorum_for(erm);
case consistency_level::ALL:
return erm.get_replication_factor();
case consistency_level::LOCAL_QUORUM:
[[fallthrough]];
case consistency_level::LOCAL_SERIAL:
return block_for_local_serial(erm);
case consistency_level::EACH_QUORUM:
Expand Down Expand Up @@ -216,6 +219,7 @@ void assure_sufficient_live_nodes(
break;
}
// Fallthough on purpose for SimpleStrategy
[[fallthrough]];
default:
size_t live = live_endpoints.size();
size_t pending = pending_endpoints.size();
Expand Down Expand Up @@ -373,6 +377,7 @@ is_sufficient_live_nodes(consistency_level cl,
return true;
}
}
[[fallthrough]];
// Fallthough on purpose for SimpleStrategy
default:
return live_endpoints.size() >= block_for(erm, cl);
Expand All @@ -393,6 +398,7 @@ void validate_for_read(consistency_level cl) {
void validate_for_write(consistency_level cl) {
switch (cl) {
case consistency_level::SERIAL:
[[fallthrough]];
case consistency_level::LOCAL_SERIAL:
throw exceptions::invalid_request_exception("You must use conditional updates for serializable writes");
default:
Expand All @@ -404,6 +410,7 @@ void validate_for_write(consistency_level cl) {
void validate_for_cas_learn(consistency_level cl, const sstring& keyspace) {
switch (cl) {
case consistency_level::SERIAL:
[[fallthrough]];
case consistency_level::LOCAL_SERIAL:
throw exceptions::invalid_request_exception(format("{} is not supported as conditional update commit consistency. Use ANY if you mean \"make sure it is accepted but I don't care how many replicas commit it for non-SERIAL reads\"", cl));
default:
Expand Down
1 change: 1 addition & 0 deletions schema/schema.cc
Original file line number Diff line number Diff line change
Expand Up @@ -409,6 +409,7 @@ schema::schema(private_tag, const raw_schema& raw, std::optional<raw_view_info>
def._thrift_bits.is_on_all_components = true;
break;
}
[[fallthrough]];
default:
// Or any other column where "comparator" is not compound
def._thrift_bits.is_on_all_components = !thrift().has_compound_comparator();
Expand Down
1 change: 1 addition & 0 deletions service/raft/raft_group0_client.cc
Original file line number Diff line number Diff line change
Expand Up @@ -273,6 +273,7 @@ future<group0_guard> raft_group0_client::start_operation(seastar::abort_source*

case group0_upgrade_state::recovery:
logger.warn("starting operation in RECOVERY mode (using old procedures)");
[[fallthrough]];
case group0_upgrade_state::use_pre_raft_procedures:
co_return group0_guard {
std::make_unique<group0_guard::impl>(
Expand Down
3 changes: 3 additions & 0 deletions service/storage_service.cc
Original file line number Diff line number Diff line change
Expand Up @@ -385,7 +385,9 @@ future<> storage_service::topology_state_load(cdc::generation_service& cdc_gen_s
}
switch (*state) {
case topology::transition_state::commit_cdc_generation:
[[fallthrough]];
case topology::transition_state::publish_cdc_generation:
[[fallthrough]];
case topology::transition_state::write_both_read_old:
return read_new_t::no;
case topology::transition_state::write_both_read_new:
Expand Down Expand Up @@ -1322,6 +1324,7 @@ class topology_coordinator {
break;
case node_state::removing:
co_await remove_from_group0(node.id);
[[fallthrough]];
case node_state::decommissioning: {
topology_mutation_builder builder(node.guard.write_timestamp());
auto next_state = node.rs->state == node_state::decommissioning
Expand Down
9 changes: 9 additions & 0 deletions sstables/index_reader.hh
Original file line number Diff line number Diff line change
Expand Up @@ -193,25 +193,29 @@ public:
_state = state::KEY_BYTES;
break;
}
[[fallthrough]];
case state::KEY_BYTES:
sstlog.trace("{}: pos {} state {} - size={}", fmt::ptr(this), current_pos(), state::KEY_BYTES, this->_u16);
if (this->read_bytes_contiguous(data, this->_u16, _key) != continuous_data_consumer::read_status::ready) {
_state = state::POSITION;
break;
}
[[fallthrough]];
case state::POSITION:
sstlog.trace("{}: pos {} state {}", fmt::ptr(this), current_pos(), state::POSITION);
if (read_vint_or_uint64(data) != continuous_data_consumer::read_status::ready) {
_state = state::PROMOTED_SIZE;
break;
}
[[fallthrough]];
case state::PROMOTED_SIZE:
sstlog.trace("{}: pos {} state {}", fmt::ptr(this), current_pos(), state::PROMOTED_SIZE);
_position = this->_u64;
if (read_vint_or_uint32(data) != continuous_data_consumer::read_status::ready) {
_state = state::PARTITION_HEADER_LENGTH_1;
break;
}
[[fallthrough]];
case state::PARTITION_HEADER_LENGTH_1: {
sstlog.trace("{}: pos {} state {}", fmt::ptr(this), current_pos(), state::PARTITION_HEADER_LENGTH_1);
auto promoted_index_size_with_header = get_uint32();
Expand All @@ -230,24 +234,28 @@ public:
break;
}
}
[[fallthrough]];
case state::PARTITION_HEADER_LENGTH_2:
sstlog.trace("{}: pos {} state {} {}", fmt::ptr(this), current_pos(), state::PARTITION_HEADER_LENGTH_2, this->_u64);
_partition_header_length = this->_u64;
state_LOCAL_DELETION_TIME:
[[fallthrough]];
case state::LOCAL_DELETION_TIME:
sstlog.trace("{}: pos {} state {}", fmt::ptr(this), current_pos(), state::LOCAL_DELETION_TIME);
_deletion_time.emplace();
if (this->read_32(data) != continuous_data_consumer::read_status::ready) {
_state = state::MARKED_FOR_DELETE_AT;
break;
}
[[fallthrough]];
case state::MARKED_FOR_DELETE_AT:
sstlog.trace("{}: pos {} state {}", fmt::ptr(this), current_pos(), state::MARKED_FOR_DELETE_AT);
_deletion_time->local_deletion_time = this->_u32;
if (this->read_64(data) != continuous_data_consumer::read_status::ready) {
_state = state::NUM_PROMOTED_INDEX_BLOCKS;
break;
}
[[fallthrough]];
case state::NUM_PROMOTED_INDEX_BLOCKS:
sstlog.trace("{}: pos {} state {}", fmt::ptr(this), current_pos(), state::NUM_PROMOTED_INDEX_BLOCKS);
_deletion_time->marked_for_delete_at = this->_u64;
Expand All @@ -256,6 +264,7 @@ public:
break;
}
state_CONSUME_ENTRY:
[[fallthrough]];
case state::CONSUME_ENTRY: {
auto promoted_index_start = current_pos();
auto promoted_index_size = _promoted_index_end - promoted_index_start;
Expand Down
15 changes: 13 additions & 2 deletions sstables/mx/parsers.hh
Original file line number Diff line number Diff line change
Expand Up @@ -123,6 +123,7 @@ public:
_state = state::CK_KIND;
return read_status::waiting;
}
[[fallthrough]];
case state::CK_KIND:
kind = bound_kind_m{_primitive._u8};
if (kind == bound_kind_m::clustering) {
Expand All @@ -133,10 +134,12 @@ public:
_state = state::CK_SIZE;
return read_status::waiting;
}
[[fallthrough]];
case state::CK_SIZE:
if (_primitive._u16 < _s.clustering_key_size()) {
ck_range.drop_back(_s.clustering_key_size() - _primitive._u16);
}
[[fallthrough]];
case state::CK_BLOCK:
ck_block_label:
if (no_more_ck_blocks()) {
Expand All @@ -152,8 +155,10 @@ public:
_state = state::CK_BLOCK_HEADER;
return read_status::waiting;
}
[[fallthrough]];
case state::CK_BLOCK_HEADER:
ck_blocks_header = _primitive._u64;
[[fallthrough]];
case state::CK_BLOCK2:
ck_block2_label:
{
Expand All @@ -177,6 +182,7 @@ public:
return read_status::waiting;
}
}
[[fallthrough]];
case state::CK_BLOCK_END:
clustering_key_values.push_back(std::move(column_value));
move_to_next_ck_block();
Expand Down Expand Up @@ -254,32 +260,35 @@ public:
_start_pos = _clustering.get_and_reset();
_clustering.set_parsing_start_key(false);
_state = state::END;
// fall-through
[[fallthrough]];
case state::END:
if (_clustering.consume(data) == read_status::waiting) {
return read_status::waiting;
}
_end_pos = _clustering.get_and_reset();
_state = state::OFFSET;
// fall-through
[[fallthrough]];
case state::OFFSET:
if (_primitive.read_unsigned_vint(data) != read_status::ready) {
_state = state::WIDTH;
return read_status::waiting;
}
[[fallthrough]];
case state::WIDTH:
_offset = _primitive._u64;
if (_primitive.read_signed_vint(data) != read_status::ready) {
_state = state::END_OPEN_MARKER_FLAG;
return read_status::waiting;
}
[[fallthrough]];
case state::END_OPEN_MARKER_FLAG:
assert(_primitive._i64 + width_base > 0);
_width = (_primitive._i64 + width_base);
if (_primitive.read_8(data) != read_status::ready) {
_state = state::END_OPEN_MARKER_LOCAL_DELETION_TIME;
return read_status::waiting;
}
[[fallthrough]];
case state::END_OPEN_MARKER_LOCAL_DELETION_TIME:
if (_primitive._u8 == 0) {
_state = state::DONE;
Expand All @@ -290,12 +299,14 @@ public:
_state = state::END_OPEN_MARKER_MARKED_FOR_DELETE_AT_1;
return read_status::waiting;
}
[[fallthrough]];
case state::END_OPEN_MARKER_MARKED_FOR_DELETE_AT_1:
_end_open_marker->local_deletion_time = _primitive._u32;
if (_primitive.read_64(data) != read_status::ready) {
_state = state::END_OPEN_MARKER_MARKED_FOR_DELETE_AT_2;
return read_status::waiting;
}
[[fallthrough]];
case state::END_OPEN_MARKER_MARKED_FOR_DELETE_AT_2:
_end_open_marker->marked_for_delete_at = _primitive._u64;
_state = state::DONE;
Expand Down
6 changes: 6 additions & 0 deletions sstables/promoted_index_blocks_reader.hh
Original file line number Diff line number Diff line change
Expand Up @@ -90,32 +90,38 @@ private:
ctx.state = state_k_l::START_NAME_BYTES;
return;
}
[[fallthrough]];
case state_k_l::START_NAME_BYTES:
if (this->read_bytes_contiguous(data, this->_u16, ctx.start) != continuous_data_consumer::read_status::ready) {
ctx.state = state_k_l::END_NAME_LENGTH;
return;
}
[[fallthrough]];
case state_k_l::END_NAME_LENGTH:
if (this->read_16(data) != continuous_data_consumer::read_status::ready) {
ctx.state = state_k_l::END_NAME_BYTES;
return;
}
[[fallthrough]];
case state_k_l::END_NAME_BYTES:
if (this->read_bytes_contiguous(data, this->_u16, ctx.end) != continuous_data_consumer::read_status::ready) {
ctx.state = state_k_l::OFFSET;
return;
}
[[fallthrough]];
case state_k_l::OFFSET:
if (this->read_64(data) != continuous_data_consumer::read_status::ready) {
ctx.state = state_k_l::WIDTH;
return;
}
[[fallthrough]];
case state_k_l::WIDTH:
ctx.offset = this->_u64;
if (this->read_64(data) != continuous_data_consumer::read_status::ready) {
ctx.state = state_k_l::ADD_BLOCK;
return;
}
[[fallthrough]];
case state_k_l::ADD_BLOCK:
ctx.width = this->_u64;
ctx.state = state_k_l::START_NAME_LENGTH;
Expand Down
2 changes: 1 addition & 1 deletion test/boost/continuous_data_consumer_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -68,7 +68,7 @@ class test_consumer final : public data_consumer::continuous_data_consumer<test_
_state = 1;
break;
}
// fall-through
[[fallthrough]];
case 1:
check(_u64);
++_count;
Expand Down
20 changes: 20 additions & 0 deletions utils/murmur_hash.cc
Original file line number Diff line number Diff line change
Expand Up @@ -106,16 +106,22 @@ uint64_t hash2_64(bytes_view key, uint64_t seed)
break;
case 7:
h64 ^= (uint64_t) key[length - rem + 6] << 48;
[[fallthrough]];
case 6:
h64 ^= (uint64_t) key[length - rem + 5] << 40;
[[fallthrough]];
case 5:
h64 ^= (uint64_t) key[length - rem + 4] << 32;
[[fallthrough]];
case 4:
h64 ^= (uint64_t) key[length - rem + 3] << 24;
[[fallthrough]];
case 3:
h64 ^= (uint64_t) key[length - rem + 2] << 16;
[[fallthrough]];
case 2:
h64 ^= (uint64_t) key[length - rem + 1] << 8;
[[fallthrough]];
case 1:
h64 ^= (uint64_t) key[length - rem];
h64 *= m64;
Expand Down Expand Up @@ -182,20 +188,34 @@ void hash3_x64_128(bytes_view key, uint64_t seed, std::array<uint64_t,2> &result
switch (length & 15)
{
case 15: k2 ^= ((uint64_t) key[14]) << 48;
[[fallthrough]];
case 14: k2 ^= ((uint64_t) key[13]) << 40;
[[fallthrough]];
case 13: k2 ^= ((uint64_t) key[12]) << 32;
[[fallthrough]];
case 12: k2 ^= ((uint64_t) key[11]) << 24;
[[fallthrough]];
case 11: k2 ^= ((uint64_t) key[10]) << 16;
[[fallthrough]];
case 10: k2 ^= ((uint64_t) key[9]) << 8;
[[fallthrough]];
case 9: k2 ^= ((uint64_t) key[8]) << 0;
k2 *= c2; k2 = rotl64(k2,33); k2 *= c1; h2 ^= k2;
[[fallthrough]];
case 8: k1 ^= ((uint64_t) key[7]) << 56;
[[fallthrough]];
case 7: k1 ^= ((uint64_t) key[6]) << 48;
[[fallthrough]];
case 6: k1 ^= ((uint64_t) key[5]) << 40;
[[fallthrough]];
case 5: k1 ^= ((uint64_t) key[4]) << 32;
[[fallthrough]];
case 4: k1 ^= ((uint64_t) key[3]) << 24;
[[fallthrough]];
case 3: k1 ^= ((uint64_t) key[2]) << 16;
[[fallthrough]];
case 2: k1 ^= ((uint64_t) key[1]) << 8;
[[fallthrough]];
case 1: k1 ^= ((uint64_t) key[0]);
k1 *= c1; k1 = rotl64(k1,31); k1 *= c2; h1 ^= k1;
};
Expand Down

0 comments on commit 0cabf4e

Please sign in to comment.