Skip to content
Permalink
Browse files
MDEV-8139: Clean up the freeing of B-tree pages
btr_page_free(): Renamed from btr_page_free_low().
If scrubbing is enabled, zero out the page with proper redo logging.
Only pass ahi=true to fseg_free_page() if the page is actually indexed.

fil_space_t::modify_check(): Renamed from fsp_space_modify_check().

fsp_init_file_page(): Define inline.
  • Loading branch information
dr-m committed Apr 8, 2019
1 parent e124ff1 commit 4b82211
Show file tree
Hide file tree
Showing 7 changed files with 98 additions and 218 deletions.
@@ -715,159 +715,57 @@ btr_page_free_for_ibuf(
mtr));
}

/**************************************************************//**
Frees a file page used in an index tree. Can be used also to (BLOB)
external storage pages. */
void
btr_page_free_low(
/*==============*/
dict_index_t* index, /*!< in: index tree */
buf_block_t* block, /*!< in: block to be freed, x-latched */
ulint level, /*!< in: page level (ULINT_UNDEFINED=BLOB) */
bool blob, /*!< in: blob page */
mtr_t* mtr) /*!< in: mtr */
/** Free an index page.
@param[in,out] index index tree
@param[in,out] block block to be freed
@param[in,out] mtr mini-transaction
@param[in] blob whether this is freeing a BLOB page */
void btr_page_free(dict_index_t* index, buf_block_t* block, mtr_t* mtr,
bool blob)
{
fseg_header_t* seg_header;
page_t* root;

ut_ad(mtr_is_block_fix(mtr, block, MTR_MEMO_PAGE_X_FIX, index->table));
#ifdef BTR_CUR_HASH_ADAPT
ut_ad(!block->index || !blob);
ut_ad(!block->index || page_is_leaf(block->frame));
#endif
ut_ad(index->space == block->page.id.space());
/* The root page is freed by btr_free_root(). */
ut_ad(block->page.id.page_no() != index->page);
ut_ad(mtr->is_named_space(index->space));

/* The page gets invalid for optimistic searches: increment the frame
modify clock */

buf_block_modify_clock_inc(block);

if (blob) {
ut_a(level == 0);
}

bool scrub = srv_immediate_scrub_data_uncompressed;
/* scrub page */
if (scrub && blob) {
/* blob page: scrub entire page */
// TODO(jonaso): scrub only what is actually needed
page_t* page = buf_block_get_frame(block);
memset(page + PAGE_HEADER, 0,
UNIV_PAGE_SIZE - PAGE_HEADER);
#ifdef UNIV_DEBUG_SCRUBBING
fprintf(stderr,
"btr_page_free_low: scrub blob page %lu/%lu\n",
buf_block_get_space(block),
buf_block_get_page_no(block));
#endif /* UNIV_DEBUG_SCRUBBING */
} else if (scrub) {
/* scrub records on page */

/* TODO(jonaso): in theory we could clear full page
* but, since page still remains in buffer pool, and
* gets flushed etc. Lots of routines validates consistency
* of it. And in order to remain structurally consistent
* we clear each record by it own
*
* NOTE: The TODO below mentions removing page from buffer pool
* and removing redo entries, once that is done, clearing full
* pages should be possible
*/
uint cnt = 0;
ulint bytes = 0;
page_t* page = buf_block_get_frame(block);
mem_heap_t* heap = NULL;
ulint* offsets = NULL;
rec_t* rec = page_rec_get_next(page_get_infimum_rec(page));
while (!page_rec_is_supremum(rec)) {
offsets = rec_get_offsets(rec, index, offsets,
page_is_leaf(page),
ULINT_UNDEFINED,
&heap);
ulint size = rec_offs_data_size(offsets);
memset(rec, 0, size);
rec = page_rec_get_next(rec);
cnt++;
bytes += size;
}
#ifdef UNIV_DEBUG_SCRUBBING
fprintf(stderr,
"btr_page_free_low: scrub %lu/%lu - "
"%u records " ULINTPF " bytes\n",
buf_block_get_space(block),
buf_block_get_page_no(block),
cnt, bytes);
#endif /* UNIV_DEBUG_SCRUBBING */
if (heap) {
mem_heap_free(heap);
}
}

#ifdef UNIV_DEBUG_SCRUBBING
if (scrub == false) {
fprintf(stderr,
"btr_page_free_low %lu/%lu blob: %u\n",
buf_block_get_space(block),
buf_block_get_page_no(block),
blob);
}
#endif /* UNIV_DEBUG_SCRUBBING */

if (dict_index_is_ibuf(index)) {

btr_page_free_for_ibuf(index, block, mtr);

return;
}

root = btr_root_get(index, mtr);

if (level == 0 || level == ULINT_UNDEFINED) {
seg_header = root + PAGE_HEADER + PAGE_BTR_SEG_LEAF;
} else {
seg_header = root + PAGE_HEADER + PAGE_BTR_SEG_TOP;
}

#ifdef UNIV_GIS_DEBUG
if (dict_index_is_spatial(index)) {
fprintf(stderr, "GIS_DIAG: Freed %ld\n",
(long) block->page.id.page_no());
}
#endif

if (scrub) {
/**
* Reset page type so that scrub thread won't try to scrub it
*/
mlog_write_ulint(buf_block_get_frame(block) + FIL_PAGE_TYPE,
FIL_PAGE_TYPE_ALLOCATED, MLOG_2BYTES, mtr);
}

/* TODO: Discard any operations for block from mtr->log.
The page will be freed, so previous changes to it by this
mini-transaction should not matter. */
page_t* root = btr_root_get(index, mtr);
fseg_header_t* seg_header = &root[blob || page_is_leaf(block->frame)
? PAGE_HEADER + PAGE_BTR_SEG_LEAF
: PAGE_HEADER + PAGE_BTR_SEG_TOP];
fseg_free_page(seg_header,
block->page.id.space(),
block->page.id.page_no(),
level != ULINT_UNDEFINED, mtr);
block->index != NULL, mtr);

/* The page was marked free in the allocation bitmap, but it
should remain buffer-fixed until mtr_commit(mtr) or until it
should remain exclusively latched until mtr_t::commit() or until it
is explicitly freed from the mini-transaction. */
ut_ad(mtr_is_block_fix(mtr, block, MTR_MEMO_PAGE_X_FIX, index->table));
/* TODO: Discard any operations on the page from the redo log
and remove the block from the flush list and the buffer pool.
This would free up buffer pool earlier and reduce writes to
both the tablespace and the redo log. */
}

/**************************************************************//**
Frees a file page used in an index tree. NOTE: cannot free field external
storage pages because the page must contain info on its level. */
void
btr_page_free(
/*==========*/
dict_index_t* index, /*!< in: index tree */
buf_block_t* block, /*!< in: block to be freed, x-latched */
mtr_t* mtr) /*!< in: mtr */
{
const page_t* page = buf_block_get_frame(block);
ulint level = btr_page_get_level(page, mtr);

ut_ad(fil_page_index_page_check(block->frame));
ut_ad(level != ULINT_UNDEFINED);
btr_page_free_low(index, block, level, false, mtr);
if (srv_immediate_scrub_data_uncompressed) {
/* In MDEV-15528 this call must be removed, and we should
zero out the page after the redo log for this mini-transaction
has been durably written. */
fsp_init_file_page(fil_space_get(index->space), block, mtr);
}
}

/**************************************************************//**
@@ -1037,7 +1037,7 @@ BtrBulk::finish(dberr_t err)
root_page_bulk.copyIn(first_rec);

/* Remove last page. */
btr_page_free_low(m_index, last_block, m_root_level, false, &mtr);
btr_page_free(m_index, last_block, &mtr);

/* Do not flush the last page. */
last_block->page.flush_observer = NULL;
@@ -7484,8 +7484,7 @@ btr_free_externally_stored_field(
}
next_page_no = mach_read_from_4(page + FIL_PAGE_NEXT);

btr_page_free_low(index, ext_block, 0,
true, &mtr);
btr_page_free(index, ext_block, &mtr, true);

if (page_zip != NULL) {
mach_write_to_4(field_ref + BTR_EXTERN_PAGE_NO,
@@ -7511,12 +7510,7 @@ btr_free_externally_stored_field(
next_page_no = mach_read_from_4(
page + FIL_PAGE_DATA
+ BTR_BLOB_HDR_NEXT_PAGE_NO);

/* We must supply the page level (= 0) as an argument
because we did not store it on the page (we save the
space overhead from an index page header. */
btr_page_free_low(index, ext_block, 0,
true, &mtr);
btr_page_free(index, ext_block, &mtr, true);

mlog_write_ulint(field_ref + BTR_EXTERN_PAGE_NO,
next_page_no,

0 comments on commit 4b82211

Please sign in to comment.