@@ -4182,7 +4182,7 @@ TRANSACTIONAL_TARGET static bool lock_release_try(trx_t *trx)
4182
4182
ulint count= 1000 ;
4183
4183
/* We will not attempt hardware lock elision (memory transaction)
4184
4184
here. Both lock_rec_dequeue_from_page() and lock_table_dequeue()
4185
- would likely lead to a memory transaction due to a system call, to
4185
+ would likely lead to a memory transaction abort due to a system call, to
4186
4186
wake up a waiting transaction. */
4187
4187
lock_sys.rd_lock (SRW_LOCK_CALL);
4188
4188
trx->mutex_lock ();
@@ -4352,28 +4352,82 @@ void lock_release_on_drop(trx_t *trx)
4352
4352
}
4353
4353
}
4354
4354
4355
- /* * Reset lock bit for supremum and rebuild waiting queue.
4355
+ /* * Reset a lock bit and rebuild waiting queue.
4356
4356
@param cell rec hash cell of in_lock
4357
4357
@param lock the lock with supemum bit set */
4358
- static void lock_rec_unlock_supremum (hash_cell_t &cell, lock_t *lock)
4358
+ static void lock_rec_unlock (hash_cell_t &cell, lock_t *lock, ulint heap_no )
4359
4359
{
4360
- ut_ad (lock_rec_get_nth_bit (lock, PAGE_HEAP_NO_SUPREMUM ));
4360
+ ut_ad (lock_rec_get_nth_bit (lock, heap_no ));
4361
4361
#ifdef SAFE_MUTEX
4362
4362
ut_ad (!mysql_mutex_is_owner (&lock_sys.wait_mutex ));
4363
4363
#endif /* SAFE_MUTEX */
4364
4364
ut_ad (!lock->is_table ());
4365
4365
ut_ad (lock_sys.is_writer () || lock->trx ->mutex_is_owner ());
4366
4366
4367
- lock_rec_reset_nth_bit (lock, PAGE_HEAP_NO_SUPREMUM );
4367
+ lock_rec_reset_nth_bit (lock, heap_no );
4368
4368
4369
- lock_t *first_lock= lock_sys_t::get_first (
4370
- cell, lock->un_member .rec_lock .page_id , PAGE_HEAP_NO_SUPREMUM );
4369
+ lock_t *first_lock=
4370
+ lock_sys_t::get_first ( cell, lock->un_member .rec_lock .page_id , heap_no );
4371
4371
4372
4372
lock_rec_rebuild_waiting_queue (
4373
4373
#if defined(UNIV_DEBUG) || !defined(DBUG_OFF)
4374
4374
lock->trx ,
4375
4375
#endif /* defined(UNIV_DEBUG) || !defined(DBUG_OFF) */
4376
- cell, first_lock, PAGE_HEAP_NO_SUPREMUM);
4376
+ cell, first_lock, heap_no);
4377
+ }
4378
+
4379
+ /* * Release locks to unmodified records on a clustered index page.
4380
+ @param cell lock_sys.rec_hash cell of lock
4381
+ @param lock record lock
4382
+ @param offsets storage for rec_get_offsets()
4383
+ @param heap storage for rec_get_offsets()
4384
+ @param mtr mini-transaction (will be started and committed) */
4385
+ static void lock_rec_unlock_unmodified (hash_cell_t &cell, lock_t *lock,
4386
+ rec_offs *&offsets, mem_heap_t *&heap,
4387
+ mtr_t &mtr)
4388
+ {
4389
+ ut_ad (!lock->is_waiting ());
4390
+
4391
+ dict_index_t *const index= lock->index ;
4392
+
4393
+ mtr.start ();
4394
+ if (buf_block_t *block=
4395
+ btr_block_get (*index, lock->un_member .rec_lock .page_id .page_no (),
4396
+ RW_S_LATCH, true , &mtr))
4397
+ {
4398
+ if (UNIV_UNLIKELY (!page_is_leaf (block->page .frame )))
4399
+ {
4400
+ ut_ad (" corrupted lock system" == 0 );
4401
+ goto func_exit;
4402
+ }
4403
+
4404
+ for (ulint i= PAGE_HEAP_NO_USER_LOW; i < lock_rec_get_n_bits (lock); ++i)
4405
+ {
4406
+ if (!lock_rec_get_nth_bit (lock, i));
4407
+ else if (const rec_t *rec=
4408
+ page_find_rec_with_heap_no (block->page .frame , i))
4409
+ {
4410
+ if (index->is_clust ())
4411
+ {
4412
+ if (trx_read_trx_id (rec + row_trx_id_offset (rec, index)) ==
4413
+ lock->trx ->id )
4414
+ continue ;
4415
+ unlock_rec:
4416
+ lock_rec_unlock (cell, lock, i);
4417
+ }
4418
+ else
4419
+ {
4420
+ offsets= rec_get_offsets (rec, index, offsets, index->n_core_fields ,
4421
+ ULINT_UNDEFINED, &heap);
4422
+ if (lock->trx !=
4423
+ lock_sec_rec_some_has_impl (lock->trx , rec, index, offsets))
4424
+ goto unlock_rec;
4425
+ }
4426
+ }
4427
+ }
4428
+ }
4429
+ func_exit:
4430
+ mtr.commit ();
4377
4431
}
4378
4432
4379
4433
/* * Release non-exclusive locks on XA PREPARE,
@@ -4391,6 +4445,12 @@ static bool lock_release_on_prepare_try(trx_t *trx)
4391
4445
DBUG_ASSERT (trx->state == TRX_STATE_PREPARED);
4392
4446
4393
4447
bool all_released= true ;
4448
+ mtr_t mtr;
4449
+ rec_offs offsets_[REC_OFFS_NORMAL_SIZE];
4450
+ rec_offs *offsets= offsets_;
4451
+ mem_heap_t *heap= nullptr ;
4452
+ rec_offs_init (offsets_);
4453
+
4394
4454
lock_sys.rd_lock (SRW_LOCK_CALL);
4395
4455
trx->mutex_lock ();
4396
4456
@@ -4407,20 +4467,24 @@ static bool lock_release_on_prepare_try(trx_t *trx)
4407
4467
if (!lock->is_table ())
4408
4468
{
4409
4469
ut_ad (!lock->index ->table ->is_temporary ());
4410
- bool supremum_bit = lock_rec_get_nth_bit (lock, PAGE_HEAP_NO_SUPREMUM);
4411
- bool rec_granted_exclusive_not_gap =
4470
+ bool supremum_bit= lock_rec_get_nth_bit (lock, PAGE_HEAP_NO_SUPREMUM);
4471
+ bool rec_granted_exclusive_not_gap=
4412
4472
lock->is_rec_granted_exclusive_not_gap ();
4413
4473
if (!supremum_bit && rec_granted_exclusive_not_gap)
4414
4474
continue ;
4415
- auto &lock_hash= lock_sys.hash_get (lock->type_mode );
4416
- auto cell= lock_hash.cell_get (lock->un_member .rec_lock .page_id .fold ());
4475
+ if (UNIV_UNLIKELY (lock->type_mode & (LOCK_PREDICATE | LOCK_PRDT_PAGE)))
4476
+ continue ; /* SPATIAL INDEX locking is broken. */
4477
+ auto cell=
4478
+ lock_sys.rec_hash .cell_get (lock->un_member .rec_lock .page_id .fold ());
4417
4479
auto latch= lock_sys_t::hash_table::latch (cell);
4418
4480
if (latch->try_acquire ())
4419
4481
{
4420
4482
if (!rec_granted_exclusive_not_gap)
4421
4483
lock_rec_dequeue_from_page (lock, false );
4422
4484
else if (supremum_bit)
4423
- lock_rec_unlock_supremum (*cell, lock);
4485
+ lock_rec_unlock (*cell, lock, PAGE_HEAP_NO_SUPREMUM);
4486
+ else
4487
+ lock_rec_unlock_unmodified (*cell, lock, offsets, heap, mtr);
4424
4488
latch->release ();
4425
4489
}
4426
4490
else
@@ -4453,6 +4517,8 @@ static bool lock_release_on_prepare_try(trx_t *trx)
4453
4517
4454
4518
lock_sys.rd_unlock ();
4455
4519
trx->mutex_unlock ();
4520
+ if (UNIV_LIKELY_NULL (heap))
4521
+ mem_heap_free (heap);
4456
4522
return all_released;
4457
4523
}
4458
4524
@@ -4466,52 +4532,71 @@ void lock_release_on_prepare(trx_t *trx)
4466
4532
if (lock_release_on_prepare_try (trx))
4467
4533
return ;
4468
4534
4469
- LockMutexGuard g{SRW_LOCK_CALL};
4470
- trx->mutex_lock ();
4535
+ mtr_t mtr;
4536
+ rec_offs offsets_[REC_OFFS_NORMAL_SIZE];
4537
+ rec_offs *offsets= offsets_;
4538
+ mem_heap_t *heap= nullptr ;
4539
+
4540
+ rec_offs_init (offsets_);
4471
4541
4472
- for (lock_t *prev, *lock= UT_LIST_GET_LAST (trx->lock .trx_locks ); lock;
4473
- lock= prev)
4474
4542
{
4475
- ut_ad (lock->trx == trx);
4476
- prev= UT_LIST_GET_PREV (trx_locks, lock);
4477
- if (!lock->is_table ())
4543
+ LockMutexGuard g{SRW_LOCK_CALL};
4544
+ trx->mutex_lock ();
4545
+
4546
+ for (lock_t *prev, *lock= UT_LIST_GET_LAST (trx->lock .trx_locks ); lock;
4547
+ lock= prev)
4478
4548
{
4479
- ut_ad (!lock->index ->table ->is_temporary ());
4480
- if (!lock->is_rec_granted_exclusive_not_gap ())
4481
- lock_rec_dequeue_from_page (lock, false );
4482
- else if (lock_rec_get_nth_bit (lock, PAGE_HEAP_NO_SUPREMUM))
4549
+ ut_ad (lock->trx == trx);
4550
+ prev= UT_LIST_GET_PREV (trx_locks, lock);
4551
+ if (!lock->is_table ())
4483
4552
{
4484
- auto &lock_hash= lock_sys.hash_get (lock->type_mode );
4485
- auto cell= lock_hash.cell_get (lock->un_member .rec_lock .page_id .fold ());
4486
- lock_rec_unlock_supremum (*cell, lock);
4553
+ ut_ad (!lock->index ->table ->is_temporary ());
4554
+ if (!lock->is_rec_granted_exclusive_not_gap ())
4555
+ lock_rec_dequeue_from_page (lock, false );
4556
+ else if (UNIV_UNLIKELY (lock->type_mode &
4557
+ (LOCK_PREDICATE | LOCK_PRDT_PAGE)))
4558
+ /* SPATIAL INDEX locking is broken. */ ;
4559
+ else
4560
+ {
4561
+ auto cell= lock_sys.rec_hash .cell_get (lock->un_member .rec_lock .
4562
+ page_id.fold ());
4563
+ if (lock_rec_get_nth_bit (lock, PAGE_HEAP_NO_SUPREMUM))
4564
+ lock_rec_unlock (*cell, lock, PAGE_HEAP_NO_SUPREMUM);
4565
+ else
4566
+ {
4567
+ ut_ad (lock->trx ->isolation_level > TRX_ISO_READ_COMMITTED ||
4568
+ /* Insert-intention lock is valid for supremum for isolation
4569
+ level > TRX_ISO_READ_COMMITTED */
4570
+ lock->mode () == LOCK_X ||
4571
+ !lock_rec_get_nth_bit (lock, PAGE_HEAP_NO_SUPREMUM));
4572
+ lock_rec_unlock_unmodified (*cell, lock, offsets, heap, mtr);
4573
+ }
4574
+ }
4487
4575
}
4488
4576
else
4489
- ut_ad (lock->trx ->isolation_level > TRX_ISO_READ_COMMITTED ||
4490
- /* Insert-intention lock is valid for supremum for isolation
4491
- level > TRX_ISO_READ_COMMITTED */
4492
- lock->mode () == LOCK_X ||
4493
- !lock_rec_get_nth_bit (lock, PAGE_HEAP_NO_SUPREMUM));
4494
- }
4495
- else
4496
- {
4497
- ut_d (dict_table_t *table= lock->un_member .tab_lock .table );
4498
- ut_ad (!table->is_temporary ());
4499
- switch (lock->mode ()) {
4500
- case LOCK_IS:
4501
- case LOCK_S:
4502
- lock_table_dequeue (lock, false );
4503
- break ;
4504
- case LOCK_IX:
4505
- case LOCK_X:
4506
- ut_ad (table->id >= DICT_HDR_FIRST_ID || trx->dict_operation );
4507
- /* fall through */
4508
- default :
4509
- break ;
4577
+ {
4578
+ ut_d (dict_table_t *table= lock->un_member .tab_lock .table );
4579
+ ut_ad (!table->is_temporary ());
4580
+ switch (lock->mode ()) {
4581
+ case LOCK_IS:
4582
+ case LOCK_S:
4583
+ lock_table_dequeue (lock, false );
4584
+ break ;
4585
+ case LOCK_IX:
4586
+ case LOCK_X:
4587
+ ut_ad (table->id >= DICT_HDR_FIRST_ID || trx->dict_operation );
4588
+ /* fall through */
4589
+ default :
4590
+ break ;
4591
+ }
4510
4592
}
4511
4593
}
4512
4594
}
4513
4595
4514
4596
trx->mutex_unlock ();
4597
+
4598
+ if (UNIV_LIKELY_NULL (heap))
4599
+ mem_heap_free (heap);
4515
4600
}
4516
4601
4517
4602
/* * Release locks on a table whose creation is being rolled back */
0 commit comments