94
94
#include " sql/psi_memory_key.h"
95
95
#include " sql/row_iterator.h"
96
96
#include " sql/sort_param.h"
97
+ #include " sql/sorting_iterator.h"
97
98
#include " sql/sql_array.h"
98
99
#include " sql/sql_base.h"
99
100
#include " sql/sql_bitmap.h"
@@ -347,12 +348,13 @@ static void trace_filesort_information(Opt_trace_context *trace,
347
348
in sorted order. This should be done with the functions
348
349
in records.cc.
349
350
350
- The result set is stored in table->sort. io_cache or
351
- table->sort. sorted_result, or left in the main filesort buffer.
351
+ The result set is stored in fs_info-> io_cache or
352
+ fs_info-> sorted_result, or left in the main filesort buffer.
352
353
353
354
@param thd Current thread
354
355
@param filesort How to sort the table
355
356
@param source_iterator Where to read the rows to be sorted from.
357
+ @param fs_info Owns the buffers for sort_result.
356
358
@param sort_result Where to store the sort result.
357
359
@param[out] found_rows Store the number of found rows here.
358
360
This is the number of found rows after
@@ -366,7 +368,8 @@ static void trace_filesort_information(Opt_trace_context *trace,
366
368
*/
367
369
368
370
bool filesort (THD *thd, Filesort *filesort, RowIterator *source_iterator,
369
- Sort_result *sort_result, ha_rows *found_rows) {
371
+ Filesort_info *fs_info, Sort_result *sort_result,
372
+ ha_rows *found_rows) {
370
373
int error;
371
374
ulong memory_available = thd->variables .sortbuff_size ;
372
375
ha_rows num_rows_found = HA_POS_ERROR;
@@ -424,7 +427,7 @@ bool filesort(THD *thd, Filesort *filesort, RowIterator *source_iterator,
424
427
table, thd->variables .max_length_for_sort_data ,
425
428
max_rows, filesort->m_remove_duplicates );
426
429
427
- table-> sort . addon_fields = param->addon_fields ;
430
+ fs_info-> addon_fields = param->addon_fields ;
428
431
429
432
/*
430
433
TODO: Now that we read from RowIterators, the situation is a lot more
@@ -457,7 +460,7 @@ bool filesort(THD *thd, Filesort *filesort, RowIterator *source_iterator,
457
460
// However, do note this cannot change the addon fields status,
458
461
// so that we at least know that when checking whether we can skip
459
462
// in-between temporary tables (StreamingIterator).
460
- if (check_if_pq_applicable (trace, param, &table-> sort , num_rows_estimate,
463
+ if (check_if_pq_applicable (trace, param, fs_info , num_rows_estimate,
461
464
memory_available)) {
462
465
DBUG_PRINT (" info" , (" filesort PQ is applicable" ));
463
466
/*
@@ -466,19 +469,19 @@ bool filesort(THD *thd, Filesort *filesort, RowIterator *source_iterator,
466
469
all pointers here. (We cannot pack fields anyways, so there is no
467
470
point in doing incremental allocation).
468
471
*/
469
- if (table-> sort . preallocate_records (param->max_rows_per_buffer )) {
472
+ if (fs_info-> preallocate_records (param->max_rows_per_buffer )) {
470
473
my_error (ER_OUT_OF_SORTMEMORY, ME_FATALERROR);
471
474
LogErr (ERROR_LEVEL, ER_SERVER_OUT_OF_SORTMEMORY);
472
475
goto err;
473
476
}
474
477
475
- if (pq.init (param->max_rows , param, table-> sort . get_sort_keys ())) {
478
+ if (pq.init (param->max_rows , param, fs_info-> get_sort_keys ())) {
476
479
/*
477
480
If we fail to init pq, we have to give up:
478
481
out of memory means my_malloc() will call my_error().
479
482
*/
480
483
DBUG_PRINT (" info" , (" failed to allocate PQ" ));
481
- table-> sort . free_sort_buffer ();
484
+ fs_info-> free_sort_buffer ();
482
485
DBUG_ASSERT (thd->is_error ());
483
486
goto err;
484
487
}
@@ -506,7 +509,7 @@ bool filesort(THD *thd, Filesort *filesort, RowIterator *source_iterator,
506
509
param->max_rows_per_buffer =
507
510
min (num_rows_estimate > 0 ? num_rows_estimate : 1 , keys);
508
511
509
- table-> sort . set_max_size (memory_available, param->max_record_length ());
512
+ fs_info-> set_max_size (memory_available, param->max_record_length ());
510
513
}
511
514
512
515
param->sort_form = table;
@@ -515,10 +518,9 @@ bool filesort(THD *thd, Filesort *filesort, RowIterator *source_iterator,
515
518
// New scope, because subquery execution must be traced within an array.
516
519
{
517
520
Opt_trace_array ota (trace, " filesort_execution" );
518
- num_rows_found =
519
- read_all_rows (thd, param, qep_tab, &table->sort , &chunk_file, &tempfile,
520
- param->using_pq ? &pq : nullptr , source_iterator,
521
- found_rows, &longest_key);
521
+ num_rows_found = read_all_rows (thd, param, qep_tab, fs_info, &chunk_file,
522
+ &tempfile, param->using_pq ? &pq : nullptr ,
523
+ source_iterator, found_rows, &longest_key);
522
524
if (num_rows_found == HA_POS_ERROR) goto err;
523
525
}
524
526
@@ -536,7 +538,7 @@ bool filesort(THD *thd, Filesort *filesort, RowIterator *source_iterator,
536
538
{
537
539
ha_rows rows_in_chunk =
538
540
param->using_pq ? pq.num_elements () : num_rows_found;
539
- if (save_index (param, rows_in_chunk, &table-> sort , sort_result)) goto err;
541
+ if (save_index (param, rows_in_chunk, fs_info , sort_result)) goto err;
540
542
} else {
541
543
// If deduplicating, we'll need to remember the previous key somehow.
542
544
if (filesort->m_remove_duplicates ) {
@@ -545,12 +547,12 @@ bool filesort(THD *thd, Filesort *filesort, RowIterator *source_iterator,
545
547
}
546
548
547
549
// We will need an extra buffer in SortFileIndirectIterator
548
- if (table-> sort . addon_fields != nullptr &&
549
- !(table-> sort . addon_fields ->allocate_addon_buf (param->m_addon_length )))
550
+ if (fs_info-> addon_fields != nullptr &&
551
+ !(fs_info-> addon_fields ->allocate_addon_buf (param->m_addon_length )))
550
552
goto err; /* purecov: inspected */
551
553
552
- table-> sort . read_chunk_descriptors (&chunk_file, num_chunks);
553
- if (table-> sort . merge_chunks .is_null ()) goto err; /* purecov: inspected */
554
+ fs_info-> read_chunk_descriptors (&chunk_file, num_chunks);
555
+ if (fs_info-> merge_chunks .is_null ()) goto err; /* purecov: inspected */
554
556
555
557
close_cached_file (&chunk_file);
556
558
@@ -562,23 +564,23 @@ bool filesort(THD *thd, Filesort *filesort, RowIterator *source_iterator,
562
564
if (reinit_io_cache (outfile, WRITE_CACHE, 0L , 0 , 0 )) goto err;
563
565
564
566
param->max_rows_per_buffer = static_cast <uint >(
565
- table-> sort . max_size_in_bytes () / param->max_record_length ());
567
+ fs_info-> max_size_in_bytes () / param->max_record_length ());
566
568
567
- Bounds_checked_array<uchar> merge_buf = table-> sort . get_contiguous_buffer ();
569
+ Bounds_checked_array<uchar> merge_buf = fs_info-> get_contiguous_buffer ();
568
570
if (merge_buf.array () == nullptr ) {
569
571
my_error (ER_OUT_OF_SORTMEMORY, ME_FATALERROR);
570
572
LogErr (ERROR_LEVEL, ER_SERVER_OUT_OF_SORTMEMORY);
571
573
goto err;
572
574
}
573
- if (merge_many_buff (thd, param, merge_buf, table-> sort . merge_chunks ,
575
+ if (merge_many_buff (thd, param, merge_buf, fs_info-> merge_chunks ,
574
576
&num_chunks, &tempfile))
575
577
goto err;
576
578
if (flush_io_cache (&tempfile) ||
577
579
reinit_io_cache (&tempfile, READ_CACHE, 0L , 0 , 0 ))
578
580
goto err;
579
581
if (merge_index (
580
582
thd, param, merge_buf,
581
- Merge_chunk_array (table-> sort . merge_chunks .begin (), num_chunks),
583
+ Merge_chunk_array (fs_info-> merge_chunks .begin (), num_chunks),
582
584
&tempfile, outfile))
583
585
goto err;
584
586
@@ -611,7 +613,7 @@ bool filesort(THD *thd, Filesort *filesort, RowIterator *source_iterator,
611
613
.add (" num_rows_estimate" , num_rows_estimate)
612
614
.add (" num_rows_found" , num_rows_found)
613
615
.add (" num_initial_chunks_spilled_to_disk" , num_initial_chunks)
614
- .add (" peak_memory_used" , table-> sort . peak_memory_used ())
616
+ .add (" peak_memory_used" , fs_info-> peak_memory_used ())
615
617
.add_alnum (" sort_algorithm" , algo_text[param->m_sort_algorithm ]);
616
618
if (!param->using_packed_addons ())
617
619
filesort_summary.add_alnum (
@@ -628,9 +630,9 @@ bool filesort(THD *thd, Filesort *filesort, RowIterator *source_iterator,
628
630
629
631
err:
630
632
if (!subselect || !subselect->is_uncacheable ()) {
631
- if (!sort_result->sorted_result_in_fsbuf ) table-> sort . free_sort_buffer ();
632
- my_free (table-> sort . merge_chunks .array ());
633
- table-> sort . merge_chunks = Merge_chunk_array (NULL , 0 );
633
+ if (!sort_result->sorted_result_in_fsbuf ) fs_info-> free_sort_buffer ();
634
+ my_free (fs_info-> merge_chunks .array ());
635
+ fs_info-> merge_chunks = Merge_chunk_array (NULL , 0 );
634
636
}
635
637
close_cached_file (&tempfile);
636
638
close_cached_file (&chunk_file);
@@ -689,10 +691,12 @@ void filesort_free_buffers(TABLE *table, bool full) {
689
691
table->unique_result .sorted_result_in_fsbuf = false ;
690
692
691
693
if (full) {
692
- table->sort .free_sort_buffer ();
693
- my_free (table->sort .merge_chunks .array ());
694
- table->sort .merge_chunks = Merge_chunk_array (NULL , 0 );
695
- table->sort .addon_fields = NULL ;
694
+ if (table->sorting_iterator != nullptr ) {
695
+ table->sorting_iterator ->CleanupAfterQuery ();
696
+ }
697
+ if (table->duplicate_removal_iterator != nullptr ) {
698
+ table->duplicate_removal_iterator ->CleanupAfterQuery ();
699
+ }
696
700
}
697
701
}
698
702
0 commit comments