Skip to content

Commit

Permalink
MDEV-31835 Remove unnecessary extra HA_EXTRA_IGNORE_INSERT call
Browse files Browse the repository at this point in the history
- This commit is different from 10.6 commit c438284.
Due to Commit 045757a (MDEV-24621),
InnoDB does buffer and pre-sort the records for each index, and build
the indexes one page at a time.

Multiple large insert ignore statment aborts the server during bulk
insert operation. Problem is that InnoDB merge record exceeds
the page size. To avoid this scenario, InnoDB should catch
too big record while buffering the insert operation itself.

row_merge_buf_encode(): returns length of the encoded index record

row_merge_buf_write(): Catches the DB_TOO_BIG_RECORD earlier and
returns error
  • Loading branch information
Thirunarayanan committed Aug 25, 2023
1 parent afc64ea commit bf3b787
Show file tree
Hide file tree
Showing 12 changed files with 33 additions and 25 deletions.
4 changes: 1 addition & 3 deletions include/my_base.h
Expand Up @@ -218,9 +218,7 @@ enum ha_extra_function {
/** Start writing rows during ALTER TABLE...ALGORITHM=COPY. */
HA_EXTRA_BEGIN_ALTER_COPY,
/** Finish writing rows during ALTER TABLE...ALGORITHM=COPY. */
HA_EXTRA_END_ALTER_COPY,
/** IGNORE is being used for the insert statement */
HA_EXTRA_IGNORE_INSERT
HA_EXTRA_END_ALTER_COPY
};

/* Compatible option, to be deleted in 6.0 */
Expand Down
2 changes: 2 additions & 0 deletions mysql-test/suite/innodb/r/insert_into_empty,4k.rdiff
@@ -0,0 +1,2 @@
423a424
> ERROR 42000: Row size too large (> 1982). Changing some columns to TEXT or BLOB or using ROW_FORMAT=DYNAMIC or ROW_FORMAT=COMPRESSED may help. In current row format, BLOB prefix of 768 bytes is stored inline.
8 changes: 8 additions & 0 deletions mysql-test/suite/innodb/t/insert_into_empty.test
Expand Up @@ -435,6 +435,14 @@ CREATE TABLE t1 (pk int primary key, c01 text, c02 text, c03 text,
SET GLOBAL INNODB_DEFAULT_ROW_FORMAT= COMPACT;
--replace_result 1982 8126 4030 8126
ALTER TABLE t1 FORCE;
let $page_size= `SELECT @@innodb_page_size`;
let $error_code = 0;

if ($page_size == 4096) {
let $error_code = ER_TOO_BIG_ROWSIZE;
}

--error $error_code
INSERT IGNORE INTO t1 VALUES
(1, REPEAT('x',4805), REPEAT('t',2211), REPEAT('u',974), REPEAT('e',871), REPEAT('z',224), REPEAT('j',978), REPEAT('n',190), REPEAT('t',888), REPEAT('x',32768), REPEAT('e',968), REPEAT('b',913), REPEAT('x',12107)),
(2, REPEAT('x',4805), REPEAT('t',2211), REPEAT('u',974), REPEAT('e',871), REPEAT('z',224), REPEAT('j',978), REPEAT('n',190), REPEAT('t',888), REPEAT('x',32768), REPEAT('e',968), REPEAT('b',913), REPEAT('x',12107));
Expand Down
1 change: 0 additions & 1 deletion sql/ha_partition.cc
Expand Up @@ -9479,7 +9479,6 @@ int ha_partition::extra(enum ha_extra_function operation)
case HA_EXTRA_STARTING_ORDERED_INDEX_SCAN:
case HA_EXTRA_BEGIN_ALTER_COPY:
case HA_EXTRA_END_ALTER_COPY:
case HA_EXTRA_IGNORE_INSERT:
DBUG_RETURN(loop_partitions(extra_cb, &operation));
default:
{
Expand Down
3 changes: 0 additions & 3 deletions sql/sql_insert.cc
Expand Up @@ -2210,9 +2210,6 @@ int write_record(THD *thd, TABLE *table, COPY_INFO *info, select_result *sink)
goto after_trg_or_ignored_err;
}

/* Notify the engine about insert ignore operation */
if (info->handle_duplicates == DUP_ERROR && info->ignore)
table->file->extra(HA_EXTRA_IGNORE_INSERT);
after_trg_n_copied_inc:
info->copied++;
thd->record_first_successful_insert_id_in_cur_stmt(table->file->insert_id_for_cur_row);
Expand Down
3 changes: 0 additions & 3 deletions sql/sql_table.cc
Expand Up @@ -11841,9 +11841,6 @@ copy_data_between_tables(THD *thd, TABLE *from, TABLE *to,
}
else
{
/* In case of alter ignore, notify the engine about it. */
if (ignore)
to->file->extra(HA_EXTRA_IGNORE_INSERT);
DEBUG_SYNC(thd, "copy_data_between_tables_before");
found_count++;
mysql_stage_set_work_completed(thd->m_stage_progress_psi, found_count);
Expand Down
1 change: 0 additions & 1 deletion sql/table.cc
Expand Up @@ -10088,7 +10088,6 @@ bool TR_table::update(ulonglong start_id, ulonglong end_id)
table->file->print_error(error, MYF(0));
/* extra() is used to apply the bulk insert operation
on mysql/transaction_registry table */
table->file->extra(HA_EXTRA_IGNORE_INSERT);
return error;
}

Expand Down
7 changes: 0 additions & 7 deletions storage/innobase/handler/ha_innodb.cc
Expand Up @@ -15679,13 +15679,6 @@ ha_innobase::extra(
case HA_EXTRA_RESET_STATE:
reset_template();
trx->duplicates = 0;
/* fall through */
case HA_EXTRA_IGNORE_INSERT:
/* HA_EXTRA_IGNORE_INSERT is very similar to
HA_EXTRA_IGNORE_DUP_KEY, but with one crucial difference:
we want !trx->duplicates for INSERT IGNORE so that
row_ins_duplicate_error_in_clust() will acquire a
shared lock instead of an exclusive lock. */
stmt_boundary:
trx->bulk_insert_apply();
trx->end_bulk_insert(*m_prebuilt->table);
Expand Down
7 changes: 7 additions & 0 deletions storage/innobase/include/trx0trx.h
Expand Up @@ -532,6 +532,13 @@ class trx_mod_table_time_t
{
return bulk_store && is_bulk_insert();
}

/** Free bulk insert operation */
void clear_bulk_buffer()
{
delete bulk_store;
bulk_store= nullptr;
}
};

/** Collection of persistent tables and their first modification
Expand Down
16 changes: 12 additions & 4 deletions storage/innobase/row/row0merge.cc
Expand Up @@ -281,10 +281,10 @@ row_merge_insert_index_tuples(
ut_stage_alter_t* stage= nullptr,
merge_file_t* blob_file= nullptr);

/******************************************************//**
Encode an index record. */
/** Encode an index record.
@return size of the record */
static MY_ATTRIBUTE((nonnull))
void
ulint
row_merge_buf_encode(
/*=================*/
byte** b, /*!< in/out: pointer to
Expand Down Expand Up @@ -315,6 +315,7 @@ row_merge_buf_encode(
entry->fields, n_fields);

*b += size;
return size;
}

static MY_ATTRIBUTE((malloc, nonnull))
Expand Down Expand Up @@ -1175,7 +1176,13 @@ dberr_t row_merge_buf_write(const row_merge_buf_t *buf,
}
}

row_merge_buf_encode(&b, index, entry, n_fields);
ulint rec_size= row_merge_buf_encode(
&b, index, entry, n_fields);
if (blob_file && rec_size > srv_page_size) {
err = DB_TOO_BIG_RECORD;
goto func_exit;
}

ut_ad(b < &block[srv_sort_buf_size]);

DBUG_LOG("ib_merge_sort",
Expand Down Expand Up @@ -5390,6 +5397,7 @@ dberr_t trx_t::bulk_insert_apply_low()
if (t.second.get_first() < low_limit)
low_limit= t.second.get_first();
delete t.second.bulk_store;
t.second.bulk_store= nullptr;
}
}
trx_savept_t bulk_save{low_limit};
Expand Down
3 changes: 3 additions & 0 deletions storage/innobase/trx/trx0roll.cc
Expand Up @@ -146,7 +146,10 @@ inline void trx_t::rollback_low(trx_savept_t *savept)
trx_mod_tables_t::iterator j= i++;
ut_ad(j->second.valid());
if (j->second.rollback(limit))
{
j->second.clear_bulk_buffer();
mod_tables.erase(j);
}
else if (!apply_online_log)
apply_online_log= j->first->is_active_ddl();
}
Expand Down
3 changes: 0 additions & 3 deletions storage/mroonga/ha_mroonga.cpp
Expand Up @@ -596,9 +596,6 @@ static const char *mrn_inspect_extra_function(enum ha_extra_function operation)
inspected = "HA_EXTRA_NO_AUTOINC_LOCKING";
break;
#endif
case HA_EXTRA_IGNORE_INSERT:
inspected = "HA_EXTRA_IGNORE_INSERT";
break;
}
return inspected;
}
Expand Down

0 comments on commit bf3b787

Please sign in to comment.