@@ -97,10 +97,6 @@ static int sort_keyuse(KEYUSE *a,KEYUSE *b);
97
97
static bool are_tables_local(JOIN_TAB *jtab, table_map used_tables);
98
98
static bool create_ref_for_key(JOIN *join, JOIN_TAB *j, KEYUSE *org_keyuse,
99
99
bool allow_full_scan, table_map used_tables);
100
- void best_access_path(JOIN *join, JOIN_TAB *s,
101
- table_map remaining_tables, uint idx,
102
- bool disable_jbuf, double record_count,
103
- POSITION *pos, POSITION *loose_scan_pos);
104
100
static void optimize_straight_join(JOIN *join, table_map join_tables);
105
101
static bool greedy_search(JOIN *join, table_map remaining_tables,
106
102
uint depth, uint prune_level,
@@ -4571,6 +4567,13 @@ make_join_statistics(JOIN *join, List<TABLE_LIST> &tables_list,
4571
4567
{
4572
4568
if (choose_plan(join, all_table_map & ~join->const_table_map))
4573
4569
goto error;
4570
+
4571
+ #ifdef HAVE_valgrind
4572
+ // JOIN::positions holds the current query plan. We've already
4573
+ // made the plan choice, so we should only use JOIN::best_positions
4574
+ for (uint k=join->const_tables; k < join->table_count; k++)
4575
+ MEM_UNDEFINED(&join->positions[k], sizeof(join->positions[k]));
4576
+ #endif
4574
4577
}
4575
4578
else
4576
4579
{
@@ -6285,6 +6288,7 @@ void
6285
6288
best_access_path(JOIN *join,
6286
6289
JOIN_TAB *s,
6287
6290
table_map remaining_tables,
6291
+ const POSITION *join_positions,
6288
6292
uint idx,
6289
6293
bool disable_jbuf,
6290
6294
double record_count,
@@ -6388,7 +6392,7 @@ best_access_path(JOIN *join,
6388
6392
if (!(keyuse->used_tables & ~join->const_table_map))
6389
6393
const_part|= keyuse->keypart_map;
6390
6394
6391
- double tmp2= prev_record_reads(join->positions , idx,
6395
+ double tmp2= prev_record_reads(join_positions , idx,
6392
6396
(found_ref | keyuse->used_tables));
6393
6397
if (tmp2 < best_prev_record_reads)
6394
6398
{
@@ -6429,7 +6433,7 @@ best_access_path(JOIN *join,
6429
6433
Really, there should be records=0.0 (yes!)
6430
6434
but 1.0 would be probably safer
6431
6435
*/
6432
- tmp= prev_record_reads(join->positions , idx, found_ref);
6436
+ tmp= prev_record_reads(join_positions , idx, found_ref);
6433
6437
records= 1.0;
6434
6438
}
6435
6439
else
@@ -6445,7 +6449,7 @@ best_access_path(JOIN *join,
6445
6449
if ((key_flags & (HA_NOSAME | HA_NULL_PART_KEY)) == HA_NOSAME ||
6446
6450
MY_TEST(key_flags & HA_EXT_NOSAME))
6447
6451
{
6448
- tmp = prev_record_reads(join->positions , idx, found_ref);
6452
+ tmp = prev_record_reads(join_positions , idx, found_ref);
6449
6453
records=1.0;
6450
6454
}
6451
6455
else
@@ -6689,7 +6693,8 @@ best_access_path(JOIN *join,
6689
6693
}
6690
6694
6691
6695
tmp= COST_ADD(tmp, s->startup_cost);
6692
- loose_scan_opt.check_ref_access_part2(key, start_key, records, tmp);
6696
+ loose_scan_opt.check_ref_access_part2(key, start_key, records, tmp,
6697
+ found_ref);
6693
6698
} /* not ft_key */
6694
6699
if (tmp + 0.0001 < best_time - records/(double) TIME_FOR_COMPARE)
6695
6700
{
@@ -7367,7 +7372,8 @@ optimize_straight_join(JOIN *join, table_map join_tables)
7367
7372
for (JOIN_TAB **pos= join->best_ref + idx ; (s= *pos) ; pos++)
7368
7373
{
7369
7374
/* Find the best access method from 's' to the current partial plan */
7370
- best_access_path(join, s, join_tables, idx, disable_jbuf, record_count,
7375
+ best_access_path(join, s, join_tables, join->positions, idx,
7376
+ disable_jbuf, record_count,
7371
7377
join->positions + idx, &loose_scan_pos);
7372
7378
7373
7379
/* compute the cost of the new plan extended with 's' */
@@ -8285,8 +8291,9 @@ best_extension_by_limited_search(JOIN *join,
8285
8291
8286
8292
/* Find the best access method from 's' to the current partial plan */
8287
8293
POSITION loose_scan_pos;
8288
- best_access_path(join, s, remaining_tables, idx, disable_jbuf,
8289
- record_count, join->positions + idx, &loose_scan_pos);
8294
+ best_access_path(join, s, remaining_tables, join->positions, idx,
8295
+ disable_jbuf, record_count, join->positions + idx,
8296
+ &loose_scan_pos);
8290
8297
8291
8298
/* Compute the cost of extending the plan with 's' */
8292
8299
current_record_count= COST_MULT(record_count, position->records_read);
@@ -8672,11 +8679,11 @@ cache_record_length(JOIN *join,uint idx)
8672
8679
*/
8673
8680
8674
8681
double
8675
- prev_record_reads(POSITION *positions, uint idx, table_map found_ref)
8682
+ prev_record_reads(const POSITION *positions, uint idx, table_map found_ref)
8676
8683
{
8677
8684
double found=1.0;
8678
- POSITION *pos_end= positions - 1;
8679
- for (POSITION *pos= positions + idx - 1; pos != pos_end; pos--)
8685
+ const POSITION *pos_end= positions - 1;
8686
+ for (const POSITION *pos= positions + idx - 1; pos != pos_end; pos--)
8680
8687
{
8681
8688
if (pos->table->table->map & found_ref)
8682
8689
{
@@ -15400,7 +15407,8 @@ void optimize_wo_join_buffering(JOIN *join, uint first_tab, uint last_tab,
15400
15407
if ((i == first_tab && first_alt) || join->positions[i].use_join_buffer)
15401
15408
{
15402
15409
/* Find the best access method that would not use join buffering */
15403
- best_access_path(join, rs, reopt_remaining_tables, i,
15410
+ best_access_path(join, rs, reopt_remaining_tables,
15411
+ join->positions, i,
15404
15412
TRUE, rec_count,
15405
15413
&pos, &loose_scan_pos);
15406
15414
}
0 commit comments