Skip to content

Commit

Permalink
Remove dict_table_t::big_rows
Browse files Browse the repository at this point in the history
The field dict_table_t::big_rows was only used for determining if
the adaptive hash index should be used when the internal InnoDB SQL
parser is used. That parser is only used for modifying the InnoDB
data dictionary, updating persistent tables, and for fulltext indexes.
  • Loading branch information
dr-m committed Oct 2, 2017
1 parent d6f857d commit cc3057f
Show file tree
Hide file tree
Showing 4 changed files with 2 additions and 42 deletions.
27 changes: 0 additions & 27 deletions storage/innobase/dict/dict0dict.cc
Original file line number Diff line number Diff line change
Expand Up @@ -1275,31 +1275,6 @@ dict_table_add_system_columns(
#endif
}

/** Mark if table has big rows.
@param[in,out] table table handler */
void
dict_table_set_big_rows(
dict_table_t* table)
{
ulint row_len = 0;
for (ulint i = 0; i < table->n_def; i++) {
ulint col_len = dict_col_get_max_size(
dict_table_get_nth_col(table, i));

row_len += col_len;

/* If we have a single unbounded field, or several gigantic
fields, mark the maximum row size as BIG_ROW_SIZE. */
if (row_len >= BIG_ROW_SIZE || col_len >= BIG_ROW_SIZE) {
row_len = BIG_ROW_SIZE;

break;
}
}

table->big_rows = (row_len >= BIG_ROW_SIZE) ? TRUE : FALSE;
}

/**********************************************************************//**
Adds a table object to the dictionary cache. */
void
Expand All @@ -1322,8 +1297,6 @@ dict_table_add_to_cache(
fold = ut_fold_string(table->name.m_name);
id_fold = ut_fold_ull(table->id);

dict_table_set_big_rows(table);

/* Look for a table with the same name: error if such exists */
{
dict_table_t* table2;
Expand Down
7 changes: 0 additions & 7 deletions storage/innobase/include/dict0dict.h
Original file line number Diff line number Diff line change
Expand Up @@ -386,13 +386,6 @@ dict_table_add_system_columns(
dict_table_t* table, /*!< in/out: table */
mem_heap_t* heap) /*!< in: temporary heap */
MY_ATTRIBUTE((nonnull));

/** Mark if table has big rows.
@param[in,out] table table handler */
void
dict_table_set_big_rows(
dict_table_t* table)
MY_ATTRIBUTE((nonnull));
/**********************************************************************//**
Adds a table object to the dictionary cache. */
void
Expand Down
4 changes: 0 additions & 4 deletions storage/innobase/include/dict0mem.h
Original file line number Diff line number Diff line change
Expand Up @@ -1493,10 +1493,6 @@ struct dict_table_t {
/*!< set of foreign key constraints which refer to this table */
dict_foreign_set referenced_set;

/** TRUE if the maximum length of a single row exceeds BIG_ROW_SIZE.
Initialized in dict_table_add_to_cache(). */
unsigned big_rows:1;

/** Statistics for query optimization. @{ */

/** Creation state of 'stats_latch'. */
Expand Down
6 changes: 2 additions & 4 deletions storage/innobase/row/row0sel.cc
Original file line number Diff line number Diff line change
Expand Up @@ -1657,8 +1657,7 @@ row_sel(

#ifdef BTR_CUR_HASH_ADAPT
if (consistent_read && plan->unique_search && !plan->pcur_is_open
&& !plan->must_get_clust
&& !plan->table->big_rows) {
&& !plan->must_get_clust) {
if (!search_latch_locked) {
btr_search_s_lock(index);

Expand Down Expand Up @@ -2085,8 +2084,7 @@ row_sel(
ut_ad(plan->pcur.latch_mode == BTR_SEARCH_LEAF);

if ((plan->n_rows_fetched <= SEL_PREFETCH_LIMIT)
|| plan->unique_search || plan->no_prefetch
|| plan->table->big_rows) {
|| plan->unique_search || plan->no_prefetch) {

/* No prefetch in operation: go to the next table */

Expand Down

0 comments on commit cc3057f

Please sign in to comment.