Skip to content

Commit c468d5c

Browse files
MDEV-15527 page_compressed compressed page partially during import tablespace
- Importing table operation fails to punch the hole in the filesystem when page compressed table is involved. To achieve that, InnoDB firstly punches the hole for the IOBuffer size(1MB). After that, InnoDB should write page by page when page compression is involved.
1 parent dfda1c9 commit c468d5c

File tree

1 file changed

+74
-11
lines changed

1 file changed

+74
-11
lines changed

storage/innobase/row/row0import.cc

Lines changed: 74 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
/*****************************************************************************
22
33
Copyright (c) 2012, 2016, Oracle and/or its affiliates. All Rights Reserved.
4-
Copyright (c) 2015, 2020, MariaDB Corporation.
4+
Copyright (c) 2015, 2021, MariaDB Corporation.
55
66
This program is free software; you can redistribute it and/or modify it under
77
the terms of the GNU General Public License as published by the Free Software
@@ -3375,6 +3375,57 @@ struct fil_iterator_t {
33753375
byte* crypt_io_buffer; /*!< IO buffer when encrypted */
33763376
};
33773377

3378+
3379+
/** InnoDB writes page by page when there is page compressed
3380+
tablespace involved. It does help to save the disk space when
3381+
punch hole is enabled
3382+
@param iter Tablespace iterator
3383+
@param write_request Request to write into the file
3384+
@param offset offset of the file to be written
3385+
@param writeptr buffer to be written
3386+
@param n_bytes number of bytes to be written
3387+
@param try_punch_only Try the range punch only because the
3388+
current range is full of empty pages
3389+
@return DB_SUCCESS */
3390+
static
3391+
dberr_t fil_import_compress_fwrite(const fil_iterator_t &iter,
3392+
const IORequest &write_request,
3393+
ulint offset,
3394+
const byte *writeptr,
3395+
ulint n_bytes,
3396+
bool try_punch_only=false)
3397+
{
3398+
dberr_t err= os_file_punch_hole(iter.file, offset, n_bytes);
3399+
if (err != DB_SUCCESS || try_punch_only)
3400+
return err;
3401+
3402+
for (ulint j= 0; j < n_bytes; j+= srv_page_size)
3403+
{
3404+
/* Read the original data length from block and
3405+
safer to read FIL_PAGE_COMPRESSED_SIZE because it
3406+
is not encrypted*/
3407+
ulint n_write_bytes= srv_page_size;
3408+
if (j || offset)
3409+
{
3410+
n_write_bytes= mach_read_from_2(writeptr + j + FIL_PAGE_DATA);
3411+
const unsigned ptype= mach_read_from_2(writeptr + j + FIL_PAGE_TYPE);
3412+
/* Ignore the empty page */
3413+
if (ptype == 0 && n_write_bytes == 0)
3414+
continue;
3415+
n_write_bytes+= FIL_PAGE_DATA + FIL_PAGE_COMPRESSED_SIZE;
3416+
if (ptype == FIL_PAGE_PAGE_COMPRESSED_ENCRYPTED)
3417+
n_write_bytes+= FIL_PAGE_COMPRESSION_METHOD_SIZE;
3418+
}
3419+
3420+
err= os_file_write(write_request, iter.filepath, iter.file,
3421+
writeptr + j, offset + j, n_write_bytes);
3422+
if (err != DB_SUCCESS)
3423+
break;
3424+
}
3425+
3426+
return err;
3427+
}
3428+
33783429
/********************************************************************//**
33793430
TODO: This can be made parallel trivially by chunking up the file and creating
33803431
a callback per thread. . Main benefit will be to use multiple CPUs for
@@ -3416,7 +3467,10 @@ fil_iterate(
34163467
/* TODO: For ROW_FORMAT=COMPRESSED tables we do a lot of useless
34173468
copying for non-index pages. Unfortunately, it is
34183469
required by buf_zip_decompress() */
3419-
dberr_t err = DB_SUCCESS;
3470+
dberr_t err = DB_SUCCESS;
3471+
bool page_compressed = false;
3472+
bool punch_hole = true;
3473+
IORequest write_request(IORequest::WRITE);
34203474

34213475
for (offset = iter.start; offset < iter.end; offset += n_bytes) {
34223476
if (callback.is_interrupted()) {
@@ -3494,9 +3548,8 @@ fil_iterate(
34943548
goto func_exit;
34953549
}
34963550

3497-
const bool page_compressed
3498-
= fil_page_is_compressed_encrypted(src)
3499-
|| fil_page_is_compressed(src);
3551+
page_compressed= fil_page_is_compressed_encrypted(src)
3552+
|| fil_page_is_compressed(src);
35003553

35013554
if (page_compressed && block->page.zip.data) {
35023555
goto page_corrupted;
@@ -3651,13 +3704,23 @@ fil_iterate(
36513704
}
36523705
}
36533706

3654-
/* A page was updated in the set, write back to disk. */
3655-
if (updated) {
3656-
IORequest write_request(IORequest::WRITE);
3707+
if (page_compressed && punch_hole && srv_use_trim) {
3708+
err = fil_import_compress_fwrite(
3709+
iter, write_request, offset, writeptr, n_bytes,
3710+
!updated);
36573711

3658-
err = os_file_write(write_request,
3659-
iter.filepath, iter.file,
3660-
writeptr, offset, n_bytes);
3712+
if (err != DB_SUCCESS) {
3713+
punch_hole = false;
3714+
if (updated) {
3715+
goto normal_write;
3716+
}
3717+
}
3718+
} else if (updated) {
3719+
/* A page was updated in the set, write back to disk. */
3720+
normal_write:
3721+
err = os_file_write(
3722+
write_request, iter.filepath, iter.file,
3723+
writeptr, offset, n_bytes);
36613724

36623725
if (err != DB_SUCCESS) {
36633726
goto func_exit;

0 commit comments

Comments
 (0)