Skip to content

Commit 9159b89

Browse files
committed
MDEV-22871: Clean up hash_table_t
HASH_TABLE_SYNC_MUTEX was kind-of used for the adaptive hash index, even though that hash table is already protected by btr_search_latches[]. HASH_TABLE_SYNC_RWLOCK was only being used for buf_pool.page_hash. It is cleaner to decouple that synchronization from hash_table_t, and move it to the actual user. buf_pool_t::page_hash_latches[]: Synchronization for buf_pool.page_hash. LATCH_ID_HASH_TABLE_MUTEX: Remove. hash_table_t::sync_obj, hash_table_t::n_sync_obj: Remove. hash_table_t::type, hash_table_sync_t: Remove. HASH_ASSERT_OWN(), hash_get_mutex(), hash_get_nth_mutex(): Remove. ib_recreate(): Merge to the only caller, buf_pool_resize_hash(). ib_create(): Merge to the callers. ha_clear(): Merge to the only caller buf_pool_t::close(). buf_pool_t::create(): Merge the ib_create() and hash_create_sync_obj() invocations. ha_insert_for_fold_func(): Clarify an assertion. buf_pool_t::page_hash_lock(): Simplify the logic. hash_assert_can_search(), hash_assert_can_modify(): Remove. These predicates were only being invoked for the adaptive hash index, while they only are effective for buf_pool.page_hash. HASH_DELETE_AND_COMPACT(): Merge to ha_delete_hash_node(). hash_get_sync_obj_index(): Remove. hash_table_t::heaps[], hash_get_nth_heap(): Remove. It was actually unused! hash_get_heap(): Remove. It was only used in ha_delete_hash_node(), where we always use hash_table_t::heap. hash_table_t::calc_hash(): Replaces hash_calc_hash().
1 parent 08f6513 commit 9159b89

File tree

16 files changed

+164
-808
lines changed

16 files changed

+164
-808
lines changed

storage/innobase/btr/btr0sea.cc

Lines changed: 7 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -387,9 +387,13 @@ void btr_search_enable(bool resize)
387387
ut_malloc(sizeof(hash_table_t*) * btr_ahi_parts, mem_key_ahi));
388388
for (ulint i = 0; i < btr_ahi_parts; ++i) {
389389
btr_search_sys->hash_tables[i] =
390-
ib_create((hash_size / btr_ahi_parts),
391-
LATCH_ID_HASH_TABLE_MUTEX,
392-
0, MEM_HEAP_FOR_BTR_SEARCH);
390+
hash_create(hash_size / btr_ahi_parts);
391+
btr_search_sys->hash_tables[i]->heap = mem_heap_create_typed(
392+
std::min<ulong>(4096,
393+
MEM_MAX_ALLOC_IN_BUF / 2
394+
- MEM_BLOCK_HEADER_SIZE
395+
- MEM_SPACE_NEEDED(0)),
396+
MEM_HEAP_FOR_BTR_SEARCH);
393397
#if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG
394398
btr_search_sys->hash_tables[i]->adaptive = TRUE;
395399
#endif /* UNIV_AHI_DEBUG || UNIV_DEBUG */

storage/innobase/buf/buf0buf.cc

Lines changed: 46 additions & 82 deletions
Original file line numberDiff line numberDiff line change
@@ -1523,11 +1523,10 @@ bool buf_pool_t::create()
15231523
ut_a(srv_n_page_hash_locks != 0);
15241524
ut_a(srv_n_page_hash_locks <= MAX_PAGE_HASH_LOCKS);
15251525

1526-
page_hash= ib_create(2 * curr_size,
1527-
LATCH_ID_HASH_TABLE_RW_LOCK,
1528-
srv_n_page_hash_locks, MEM_HEAP_FOR_PAGE_HASH);
1529-
1530-
ut_ad(!page_hash_old);
1526+
page_hash= hash_create(2 * curr_size);
1527+
for (auto i= srv_n_page_hash_locks; i--; )
1528+
rw_lock_create(hash_table_locks_key, &page_hash_latches[i],
1529+
SYNC_BUF_PAGE_HASH);
15311530
zip_hash= hash_create(2 * curr_size);
15321531
last_printout_time= time(NULL);
15331532

@@ -1605,7 +1604,8 @@ void buf_pool_t::close()
16051604

16061605
ut_free(chunks);
16071606
chunks= nullptr;
1608-
ha_clear(page_hash);
1607+
for (auto i= srv_n_page_hash_locks; i--; )
1608+
rw_lock_free(&page_hash_latches[i]);
16091609
hash_table_free(page_hash);
16101610
hash_table_free(zip_hash);
16111611

@@ -1924,78 +1924,44 @@ inline bool buf_pool_t::withdraw_blocks()
19241924
/** resize page_hash and zip_hash */
19251925
static void buf_pool_resize_hash()
19261926
{
1927-
hash_table_t* new_hash_table;
1928-
1929-
ut_ad(buf_pool.page_hash_old == NULL);
1930-
1931-
/* recreate page_hash */
1932-
new_hash_table = ib_recreate(
1933-
buf_pool.page_hash, 2 * buf_pool.curr_size);
1934-
1935-
for (ulint i = 0; i < hash_get_n_cells(buf_pool.page_hash); i++) {
1936-
buf_page_t* bpage;
1937-
1938-
bpage = static_cast<buf_page_t*>(
1939-
HASH_GET_FIRST(
1940-
buf_pool.page_hash, i));
1941-
1942-
while (bpage) {
1943-
buf_page_t* prev_bpage = bpage;
1944-
ulint fold;
1945-
1946-
ut_ad(bpage->in_page_hash);
1947-
bpage = static_cast<buf_page_t*>(
1948-
HASH_GET_NEXT(
1949-
hash, prev_bpage));
1950-
1951-
fold = prev_bpage->id().fold();
1952-
1953-
HASH_DELETE(buf_page_t, hash,
1954-
buf_pool.page_hash, fold,
1955-
prev_bpage);
1956-
1957-
HASH_INSERT(buf_page_t, hash,
1958-
new_hash_table, fold,
1959-
prev_bpage);
1960-
}
1961-
}
1962-
1963-
buf_pool.page_hash_old = buf_pool.page_hash;
1964-
buf_pool.page_hash = new_hash_table;
1965-
1966-
/* recreate zip_hash */
1967-
new_hash_table = hash_create(2 * buf_pool.curr_size);
1968-
1969-
for (ulint i = 0; i < hash_get_n_cells(buf_pool.zip_hash); i++) {
1970-
buf_page_t* bpage;
1971-
1972-
bpage = static_cast<buf_page_t*>(
1973-
HASH_GET_FIRST(buf_pool.zip_hash, i));
1927+
hash_table_t *new_hash_table= hash_create(2 * buf_pool.curr_size);
19741928

1975-
while (bpage) {
1976-
buf_page_t* prev_bpage = bpage;
1977-
ulint fold;
1978-
1979-
bpage = static_cast<buf_page_t*>(
1980-
HASH_GET_NEXT(
1981-
hash, prev_bpage));
1929+
for (ulint i= 0; i < hash_get_n_cells(buf_pool.page_hash); i++)
1930+
{
1931+
while (buf_page_t *bpage= static_cast<buf_page_t*>
1932+
(HASH_GET_FIRST(buf_pool.page_hash, i)))
1933+
{
1934+
buf_page_t *prev_bpage= bpage;
1935+
ut_ad(bpage->in_page_hash);
1936+
bpage= static_cast<buf_page_t*>(HASH_GET_NEXT(hash, prev_bpage));
1937+
const ulint fold= prev_bpage->id().fold();
1938+
HASH_DELETE(buf_page_t, hash, buf_pool.page_hash, fold, prev_bpage);
1939+
HASH_INSERT(buf_page_t, hash, new_hash_table, fold, prev_bpage);
1940+
}
1941+
}
19821942

1983-
fold = BUF_POOL_ZIP_FOLD(
1984-
reinterpret_cast<buf_block_t*>(
1985-
prev_bpage));
1943+
std::swap(buf_pool.page_hash->array, new_hash_table->array);
1944+
buf_pool.page_hash->n_cells= new_hash_table->n_cells;
1945+
hash_table_free(new_hash_table);
19861946

1987-
HASH_DELETE(buf_page_t, hash,
1988-
buf_pool.zip_hash, fold,
1989-
prev_bpage);
1947+
/* recreate zip_hash */
1948+
new_hash_table= hash_create(2 * buf_pool.curr_size);
19901949

1991-
HASH_INSERT(buf_page_t, hash,
1992-
new_hash_table, fold,
1993-
prev_bpage);
1994-
}
1995-
}
1950+
for (ulint i= 0; i < hash_get_n_cells(buf_pool.zip_hash); i++)
1951+
{
1952+
while (buf_page_t *bpage= static_cast<buf_page_t*>
1953+
(HASH_GET_FIRST(buf_pool.zip_hash, i)))
1954+
{
1955+
buf_page_t *prev_bpage= bpage;
1956+
bpage= static_cast<buf_page_t*>(HASH_GET_NEXT(hash, prev_bpage));
1957+
const ulint fold= BUF_POOL_ZIP_FOLD_BPAGE(prev_bpage);
1958+
HASH_DELETE(buf_page_t, hash, buf_pool.zip_hash, fold, prev_bpage);
1959+
HASH_INSERT(buf_page_t, hash, new_hash_table, fold, prev_bpage);
1960+
}
1961+
}
19961962

1997-
hash_table_free(buf_pool.zip_hash);
1998-
buf_pool.zip_hash = new_hash_table;
1963+
hash_table_free(buf_pool.zip_hash);
1964+
buf_pool.zip_hash= new_hash_table;
19991965
}
20001966

20011967

@@ -2162,8 +2128,10 @@ inline void buf_pool_t::resize()
21622128
/* Indicate critical path */
21632129
resizing.store(true, std::memory_order_relaxed);
21642130

2165-
mutex_enter(&mutex);
2166-
page_hash_lock_all();
2131+
mutex_enter(&mutex);
2132+
for (auto i= srv_n_page_hash_locks; i--; )
2133+
rw_lock_x_lock(&page_hash_latches[i]);
2134+
21672135
chunk_t::map_reg = UT_NEW_NOKEY(chunk_t::map());
21682136

21692137
/* add/delete chunks */
@@ -2312,13 +2280,9 @@ inline void buf_pool_t::resize()
23122280
ib::info() << "hash tables were resized";
23132281
}
23142282

2315-
page_hash_unlock_all();
2316-
mutex_exit(&mutex);
2317-
2318-
if (page_hash_old != NULL) {
2319-
hash_table_free(page_hash_old);
2320-
page_hash_old = NULL;
2321-
}
2283+
mutex_exit(&mutex);
2284+
for (auto i= srv_n_page_hash_locks; i--; )
2285+
rw_lock_x_unlock(&page_hash_latches[i]);
23222286

23232287
UT_DELETE(chunk_map_old);
23242288

0 commit comments

Comments
 (0)