diff --git a/src/Common/RWLock.cpp b/src/Common/RWLock.cpp index 5a13bb83f29e..a3ba9523f6ce 100644 --- a/src/Common/RWLock.cpp +++ b/src/Common/RWLock.cpp @@ -177,6 +177,7 @@ RWLockImpl::getLock(RWLockImpl::Type type, const String & query_id, const std::c /// Lock is free to acquire if (rdlock_owner == readers_queue.end() && wrlock_owner == writers_queue.end()) { + /// Set `rdlock_owner` or `wrlock_owner` and make it owner. (type == Read ? rdlock_owner : wrlock_owner) = it_group; /// SM2: nothrow grantOwnership(it_group); } @@ -341,13 +342,21 @@ void RWLockImpl::grantOwnershipToAllReaders() noexcept { if (rdlock_owner != readers_queue.end()) { + size_t num_new_owners = 0; + for (;;) { + if (!rdlock_owner->ownership) + ++num_new_owners; grantOwnership(rdlock_owner); if (std::next(rdlock_owner) == readers_queue.end()) break; ++rdlock_owner; } + + /// There couldn't be more than one reader group which is not an owner. + /// (Because we add a new reader group only if the last reader group is already an owner - see the `can_use_last_group` variable.) + chassert(num_new_owners <= 1); } } diff --git a/src/Common/tests/gtest_rw_lock.cpp b/src/Common/tests/gtest_rw_lock.cpp index 7de3ced2d0df..16ba01d02c63 100644 --- a/src/Common/tests/gtest_rw_lock.cpp +++ b/src/Common/tests/gtest_rw_lock.cpp @@ -37,7 +37,7 @@ namespace if (timepoint.length() < 5) timepoint.insert(0, 5 - timepoint.length(), ' '); std::lock_guard lock{mutex}; - std::cout << timepoint << " : " << event << std::endl; + //std::cout << timepoint << " : " << event << std::endl; events.emplace_back(std::move(event)); } diff --git a/tests/integration/test_backup_restore_on_cluster/test_concurrency.py b/tests/integration/test_backup_restore_on_cluster/test_concurrency.py index 65f05d186ceb..c08f3c9c2426 100644 --- a/tests/integration/test_backup_restore_on_cluster/test_concurrency.py +++ b/tests/integration/test_backup_restore_on_cluster/test_concurrency.py @@ -237,7 +237,8 @@ def truncate_tables(): while time.time() < end_time: table_name = f"mydb.tbl{randint(1, num_nodes)}" node = nodes[randint(0, num_nodes - 1)] - # "TRUNCATE TABLE IF EXISTS" still can throw some errors (e.g. "WRITE locking attempt on node0 has timed out!") + # "TRUNCATE TABLE IF EXISTS" still can throw some errors + # (e.g. "WRITE locking attempt on node0 has timed out!" if the table engine is "Log"). # So we use query_and_get_answer_with_error() to ignore any errors. # `lock_acquire_timeout` is reduced because we don't wait our test to wait too long. node.query_and_get_answer_with_error(