Skip to content

Commit

Permalink
cleanup: Rename Sort_param::max_rows to limit_rows
Browse files Browse the repository at this point in the history
This makes the code easier to read as the intent of the parameter is
clearer.

Reviewer: Monty
  • Loading branch information
cvicentiu authored and spetrunia committed Feb 2, 2023
1 parent 488148d commit fa90ac6
Show file tree
Hide file tree
Showing 3 changed files with 18 additions and 18 deletions.
32 changes: 16 additions & 16 deletions sql/filesort.cc
Original file line number Diff line number Diff line change
Expand Up @@ -126,7 +126,7 @@ void Sort_param::init_for_filesort(uint sortlen, TABLE *table,
sort_length+= ref_length;
}
rec_length= sort_length + addon_length;
max_rows= maxrows;
limit_rows= maxrows;
}


Expand Down Expand Up @@ -204,7 +204,7 @@ SORT_INFO *filesort(THD *thd, TABLE *table, Filesort *filesort,
bool allow_packing_for_sortkeys;
Bounded_queue<uchar, uchar> pq;
SQL_SELECT *const select= filesort->select;
ha_rows max_rows= filesort->limit;
ha_rows limit_rows= filesort->limit;
uint s_length= 0, sort_len;
Sort_keys *sort_keys;
DBUG_ENTER("filesort");
Expand Down Expand Up @@ -249,7 +249,7 @@ SORT_INFO *filesort(THD *thd, TABLE *table, Filesort *filesort,

param.sort_keys= sort_keys;
sort_len= sortlength(thd, sort_keys, &allow_packing_for_sortkeys);
param.init_for_filesort(sort_len, table, max_rows, filesort);
param.init_for_filesort(sort_len, table, limit_rows, filesort);
if (!param.accepted_rows)
param.accepted_rows= &not_used;

Expand All @@ -264,7 +264,7 @@ SORT_INFO *filesort(THD *thd, TABLE *table, Filesort *filesort,
else
thd->inc_status_sort_scan();
thd->query_plan_flags|= QPLAN_FILESORT;
tracker->report_use(thd, max_rows);
tracker->report_use(thd, limit_rows);

// If number of rows is not known, use as much of sort buffer as possible.
num_rows= table->file->estimate_rows_upper_bound();
Expand All @@ -286,7 +286,7 @@ SORT_INFO *filesort(THD *thd, TABLE *table, Filesort *filesort,
point in doing lazy initialization).
*/
sort->init_record_pointers();
if (pq.init(param.max_rows,
if (pq.init(param.limit_rows,
true, // max_at_top
NULL, // compare_function
compare_length,
Expand Down Expand Up @@ -431,10 +431,10 @@ SORT_INFO *filesort(THD *thd, TABLE *table, Filesort *filesort,
goto err;
}

if (num_rows > param.max_rows)
if (num_rows > param.limit_rows)
{
// If find_all_keys() produced more results than the query LIMIT.
num_rows= param.max_rows;
num_rows= param.limit_rows;
}
error= 0;

Expand Down Expand Up @@ -1073,8 +1073,8 @@ write_keys(Sort_param *param, SORT_INFO *fs_info, uint count,
DBUG_RETURN(1);

buffpek.set_file_position(my_b_tell(tempfile));
if ((ha_rows) count > param->max_rows)
count=(uint) param->max_rows; /* purecov: inspected */
if ((ha_rows) count > param->limit_rows)
count=(uint) param->limit_rows; /* purecov: inspected */
buffpek.set_rowcount(static_cast<ha_rows>(count));

for (uint ix= 0; ix < count; ++ix)
Expand Down Expand Up @@ -1538,13 +1538,13 @@ static bool check_if_pq_applicable(Sort_param *param,
*/
const double PQ_slowness= 3.0;

if (param->max_rows == HA_POS_ERROR)
if (param->limit_rows == HA_POS_ERROR)
{
DBUG_PRINT("info", ("No LIMIT"));
DBUG_RETURN(false);
}

if (param->max_rows >= UINT_MAX - 2)
if (param->limit_rows >= UINT_MAX - 2)
{
DBUG_PRINT("info", ("Too large LIMIT"));
DBUG_RETURN(false);
Expand All @@ -1553,12 +1553,12 @@ static bool check_if_pq_applicable(Sort_param *param,
size_t num_available_keys=
memory_available / (param->rec_length + sizeof(char*));
// We need 1 extra record in the buffer, when using PQ.
param->max_keys_per_buffer= (uint) param->max_rows + 1;
param->max_keys_per_buffer= (uint) param->limit_rows + 1;

if (num_rows < num_available_keys)
{
// The whole source set fits into memory.
if (param->max_rows < num_rows/PQ_slowness )
if (param->limit_rows < num_rows/PQ_slowness )
{
filesort_info->alloc_sort_buffer(param->max_keys_per_buffer,
param->rec_length);
Expand Down Expand Up @@ -1606,7 +1606,7 @@ static bool check_if_pq_applicable(Sort_param *param,
(PQ_slowness * num_rows + param->max_keys_per_buffer) *
log((double) param->max_keys_per_buffer) *
ROWID_COMPARE_COST_THD(table->in_use);
const double pq_io_cost= table->file->ha_rnd_pos_time(param->max_rows);
const double pq_io_cost= table->file->ha_rnd_pos_time(param->limit_rows);
const double pq_cost= pq_cpu_cost + pq_io_cost;

if (sort_merge_cost < pq_cost)
Expand Down Expand Up @@ -1863,7 +1863,7 @@ bool merge_buffers(Sort_param *param, IO_CACHE *from_file,
maxcount= (ulong) (param->max_keys_per_buffer/((uint) (Tb-Fb) +1));
to_start_filepos= my_b_tell(to_file);
strpos= sort_buffer.array();
org_max_rows=max_rows= param->max_rows;
org_max_rows= max_rows= param->limit_rows;

set_if_bigger(maxcount, 1);

Expand Down Expand Up @@ -2084,7 +2084,7 @@ bool merge_buffers(Sort_param *param, IO_CACHE *from_file,
bytes_read != 0);

end:
lastbuff->set_rowcount(MY_MIN(org_max_rows-max_rows, param->max_rows));
lastbuff->set_rowcount(MY_MIN(org_max_rows - max_rows, param->limit_rows));
lastbuff->set_file_position(to_start_filepos);

cleanup:
Expand Down
2 changes: 1 addition & 1 deletion sql/sql_sort.h
Original file line number Diff line number Diff line change
Expand Up @@ -548,7 +548,7 @@ class Sort_param {
uint res_length; // Length of records in final sorted file/buffer.
uint max_keys_per_buffer; // Max keys / buffer.
uint min_dupl_count;
ha_rows max_rows; // Select limit, or HA_POS_ERROR if unlimited.
ha_rows limit_rows; // Select limit, or HA_POS_ERROR if unlimited.
ha_rows examined_rows; // Number of examined rows.
TABLE *sort_form; // For quicker make_sortkey.
/**
Expand Down
2 changes: 1 addition & 1 deletion sql/uniques.cc
Original file line number Diff line number Diff line change
Expand Up @@ -721,7 +721,7 @@ bool Unique::merge(TABLE *table, uchar *buff, size_t buff_size,
return 1;

bzero((char*) &sort_param,sizeof(sort_param));
sort_param.max_rows= elements;
sort_param.limit_rows= elements;
sort_param.sort_form= table;
sort_param.rec_length= sort_param.sort_length= sort_param.ref_length=
full_size;
Expand Down

0 comments on commit fa90ac6

Please sign in to comment.