Commit 27d4333c authored by Marko Mäkelä's avatar Marko Mäkelä

MDEV-13935 INSERT stuck at state Unlocking tables

Refactor lock_grant(). With innodb_lock_schedule_algorithm=VATS
some callers were passing an incorrect parameter owns_trx_mutex
to lock_grant().

lock_grant_after_reset(): Refactored from lock_grant(), without
the call to lock_reset_lock_and_trx_wait().

lock_grant_have_trx_mutex(): A variant of lock_grant() where the
caller already holds the lock->trx->mutex. The normal lock_grant()
will acquire and release lock->trx->mutex.

lock_grant(): Define as a wrapper that will acquire lock->trx->mutex.
parent f93a219c
...@@ -80,15 +80,9 @@ lock_rec_has_to_wait_in_queue( ...@@ -80,15 +80,9 @@ lock_rec_has_to_wait_in_queue(
/*==========================*/ /*==========================*/
const lock_t* wait_lock); /*!< in: waiting record lock */ const lock_t* wait_lock); /*!< in: waiting record lock */
/*************************************************************//** /** Grant a lock to a waiting lock request and release the waiting transaction
Grants a lock to a waiting lock request and releases the waiting transaction. after lock_reset_lock_and_trx_wait() has been called. */
The caller must hold lock_sys->mutex. */ static void lock_grant_after_reset(lock_t* lock);
static
void
lock_grant(
/*=======*/
lock_t* lock, /*!< in/out: waiting lock request */
bool owns_trx_mutex); /*!< in: whether lock->trx->mutex is owned */
extern "C" void thd_rpl_deadlock_check(MYSQL_THD thd, MYSQL_THD other_thd); extern "C" void thd_rpl_deadlock_check(MYSQL_THD thd, MYSQL_THD other_thd);
extern "C" int thd_need_wait_reports(const MYSQL_THD thd); extern "C" int thd_need_wait_reports(const MYSQL_THD thd);
...@@ -691,6 +685,12 @@ lock_reset_lock_and_trx_wait( ...@@ -691,6 +685,12 @@ lock_reset_lock_and_trx_wait(
lock->type_mode &= ~LOCK_WAIT; lock->type_mode &= ~LOCK_WAIT;
} }
static inline void lock_grant_have_trx_mutex(lock_t* lock)
{
lock_reset_lock_and_trx_wait(lock);
lock_grant_after_reset(lock);
}
/*********************************************************************//** /*********************************************************************//**
Gets the gap flag of a record lock. Gets the gap flag of a record lock.
@return LOCK_GAP or 0 */ @return LOCK_GAP or 0 */
...@@ -1772,7 +1772,7 @@ lock_rec_insert_by_trx_age( ...@@ -1772,7 +1772,7 @@ lock_rec_insert_by_trx_age(
cell->node = in_lock; cell->node = in_lock;
in_lock->hash = node; in_lock->hash = node;
if (lock_get_wait(in_lock)) { if (lock_get_wait(in_lock)) {
lock_grant(in_lock, true); lock_grant_have_trx_mutex(in_lock);
return DB_SUCCESS_LOCKED_REC; return DB_SUCCESS_LOCKED_REC;
} }
return DB_SUCCESS; return DB_SUCCESS;
...@@ -1786,7 +1786,7 @@ lock_rec_insert_by_trx_age( ...@@ -1786,7 +1786,7 @@ lock_rec_insert_by_trx_age(
in_lock->hash = next; in_lock->hash = next;
if (lock_get_wait(in_lock) && !lock_rec_has_to_wait_in_queue(in_lock)) { if (lock_get_wait(in_lock) && !lock_rec_has_to_wait_in_queue(in_lock)) {
lock_grant(in_lock, true); lock_grant_have_trx_mutex(in_lock);
if (cell->node != in_lock) { if (cell->node != in_lock) {
// Move it to the front of the queue // Move it to the front of the queue
node->hash = in_lock->hash; node->hash = in_lock->hash;
...@@ -2380,24 +2380,12 @@ lock_rec_has_to_wait_in_queue( ...@@ -2380,24 +2380,12 @@ lock_rec_has_to_wait_in_queue(
return(NULL); return(NULL);
} }
/*************************************************************//** /** Grant a lock to a waiting lock request and release the waiting transaction
Grants a lock to a waiting lock request and releases the waiting transaction. after lock_reset_lock_and_trx_wait() has been called. */
The caller must hold lock_sys->mutex but not lock->trx->mutex. */ static void lock_grant_after_reset(lock_t* lock)
static
void
lock_grant(
/*=======*/
lock_t* lock, /*!< in/out: waiting lock request */
bool owns_trx_mutex) /*!< in: whether lock->trx->mutex is owned */
{ {
ut_ad(lock_mutex_own()); ut_ad(lock_mutex_own());
ut_ad(trx_mutex_own(lock->trx) == owns_trx_mutex); ut_ad(trx_mutex_own(lock->trx));
lock_reset_lock_and_trx_wait(lock);
if (!owns_trx_mutex) {
trx_mutex_enter(lock->trx);
}
if (lock_get_mode(lock) == LOCK_AUTO_INC) { if (lock_get_mode(lock) == LOCK_AUTO_INC) {
dict_table_t* table = lock->un_member.tab_lock.table; dict_table_t* table = lock->un_member.tab_lock.table;
...@@ -2429,10 +2417,15 @@ lock_grant( ...@@ -2429,10 +2417,15 @@ lock_grant(
lock_wait_release_thread_if_suspended(thr); lock_wait_release_thread_if_suspended(thr);
} }
} }
}
if (!owns_trx_mutex) { /** Grant a lock to a waiting lock request and release the waiting transaction. */
trx_mutex_exit(lock->trx); static void lock_grant(lock_t* lock)
} {
lock_reset_lock_and_trx_wait(lock);
trx_mutex_enter(lock->trx);
lock_grant_after_reset(lock);
trx_mutex_exit(lock->trx);
} }
/*************************************************************//** /*************************************************************//**
...@@ -2472,17 +2465,13 @@ lock_rec_cancel( ...@@ -2472,17 +2465,13 @@ lock_rec_cancel(
static static
void void
lock_grant_and_move_on_page( lock_grant_and_move_on_page(ulint rec_fold, ulint space, ulint page_no)
hash_table_t* lock_hash,
ulint space,
ulint page_no)
{ {
lock_t* lock; lock_t* lock;
lock_t* previous; lock_t* previous = static_cast<lock_t*>(
ulint rec_fold = lock_rec_fold(space, page_no); hash_get_nth_cell(lock_sys->rec_hash,
hash_calc_hash(rec_fold, lock_sys->rec_hash))
previous = (lock_t *) hash_get_nth_cell(lock_hash, ->node);
hash_calc_hash(rec_fold, lock_hash))->node;
if (previous == NULL) { if (previous == NULL) {
return; return;
} }
...@@ -2502,14 +2491,13 @@ lock_grant_and_move_on_page( ...@@ -2502,14 +2491,13 @@ lock_grant_and_move_on_page(
ut_ad(previous->hash == lock || previous == lock); ut_ad(previous->hash == lock || previous == lock);
/* Grant locks if there are no conflicting locks ahead. /* Grant locks if there are no conflicting locks ahead.
Move granted locks to the head of the list. */ Move granted locks to the head of the list. */
for (;lock != NULL;) { while (lock) {
/* If the lock is a wait lock on this page, and it does not need to wait. */ /* If the lock is a wait lock on this page, and it does not need to wait. */
if ((lock->un_member.rec_lock.space == space) if (lock_get_wait(lock)
&& (lock->un_member.rec_lock.page_no == page_no) && lock->un_member.rec_lock.space == space
&& lock_get_wait(lock) && lock->un_member.rec_lock.page_no == page_no
&& !lock_rec_has_to_wait_in_queue(lock)) { && !lock_rec_has_to_wait_in_queue(lock)) {
lock_grant(lock);
lock_grant(lock, false);
if (previous != NULL) { if (previous != NULL) {
/* Move the lock to the head of the list. */ /* Move the lock to the head of the list. */
...@@ -2528,33 +2516,20 @@ lock_grant_and_move_on_page( ...@@ -2528,33 +2516,20 @@ lock_grant_and_move_on_page(
} }
} }
/*************************************************************//** /** Remove a record lock request, waiting or granted, from the queue and
Removes a record lock request, waiting or granted, from the queue and grant locks to other transactions in the queue if they now are entitled
grants locks to other transactions in the queue if they now are entitled to a lock. NOTE: all record locks contained in in_lock are removed.
to a lock. NOTE: all record locks contained in in_lock are removed. */ @param[in,out] in_lock record lock */
static static void lock_rec_dequeue_from_page(lock_t* in_lock)
void
lock_rec_dequeue_from_page(
/*=======================*/
lock_t* in_lock) /*!< in: record lock object: all
record locks which are contained in
this lock object are removed;
transactions waiting behind will
get their lock requests granted,
if they are now qualified to it */
{ {
ulint space; ulint space;
ulint page_no; ulint page_no;
lock_t* lock;
trx_lock_t* trx_lock;
hash_table_t* lock_hash; hash_table_t* lock_hash;
ut_ad(lock_mutex_own()); ut_ad(lock_mutex_own());
ut_ad(lock_get_type_low(in_lock) == LOCK_REC); ut_ad(lock_get_type_low(in_lock) == LOCK_REC);
/* We may or may not be holding in_lock->trx->mutex here. */ /* We may or may not be holding in_lock->trx->mutex here. */
trx_lock = &in_lock->trx->lock;
space = in_lock->un_member.rec_lock.space; space = in_lock->un_member.rec_lock.space;
page_no = in_lock->un_member.rec_lock.page_no; page_no = in_lock->un_member.rec_lock.page_no;
...@@ -2562,38 +2537,36 @@ lock_rec_dequeue_from_page( ...@@ -2562,38 +2537,36 @@ lock_rec_dequeue_from_page(
lock_hash = lock_hash_get(in_lock->type_mode); lock_hash = lock_hash_get(in_lock->type_mode);
HASH_DELETE(lock_t, hash, lock_hash, ulint rec_fold = lock_rec_fold(space, page_no);
lock_rec_fold(space, page_no), in_lock);
UT_LIST_REMOVE(trx_lock->trx_locks, in_lock); HASH_DELETE(lock_t, hash, lock_hash, rec_fold, in_lock);
UT_LIST_REMOVE(in_lock->trx->lock.trx_locks, in_lock);
MONITOR_INC(MONITOR_RECLOCK_REMOVED); MONITOR_INC(MONITOR_RECLOCK_REMOVED);
MONITOR_DEC(MONITOR_NUM_RECLOCK); MONITOR_DEC(MONITOR_NUM_RECLOCK);
if (innodb_lock_schedule_algorithm if (innodb_lock_schedule_algorithm
== INNODB_LOCK_SCHEDULE_ALGORITHM_FCFS || == INNODB_LOCK_SCHEDULE_ALGORITHM_FCFS
thd_is_replication_slave_thread(in_lock->trx->mysql_thd)) { || lock_hash != lock_sys->rec_hash
|| thd_is_replication_slave_thread(in_lock->trx->mysql_thd)) {
/* Check if waiting locks in the queue can now be granted: /* Check if waiting locks in the queue can now be granted:
grant locks if there are no conflicting locks ahead. Stop at grant locks if there are no conflicting locks ahead. Stop at
the first X lock that is waiting or has been granted. */ the first X lock that is waiting or has been granted. */
for (lock = lock_rec_get_first_on_page_addr(lock_hash, space, for (lock_t* lock = lock_rec_get_first_on_page_addr(
page_no); lock_hash, space, page_no);
lock != NULL; lock != NULL;
lock = lock_rec_get_next_on_page(lock)) { lock = lock_rec_get_next_on_page(lock)) {
if (lock_get_wait(lock) if (lock_get_wait(lock)
&& !lock_rec_has_to_wait_in_queue(lock)) { && !lock_rec_has_to_wait_in_queue(lock)) {
/* Grant the lock */ /* Grant the lock */
ut_ad(lock->trx != in_lock->trx); ut_ad(lock->trx != in_lock->trx);
lock_grant(lock);
lock_grant(lock, false);
} }
} }
} else { } else {
lock_grant_and_move_on_page(lock_hash, space, page_no); lock_grant_and_move_on_page(rec_fold, space, page_no);
} }
} }
...@@ -4322,7 +4295,7 @@ lock_table_dequeue( ...@@ -4322,7 +4295,7 @@ lock_table_dequeue(
/* Grant the lock */ /* Grant the lock */
ut_ad(in_lock->trx != lock->trx); ut_ad(in_lock->trx != lock->trx);
lock_grant(lock, false); lock_grant(lock);
} }
} }
} }
...@@ -4424,7 +4397,7 @@ lock_grant_and_move_on_rec( ...@@ -4424,7 +4397,7 @@ lock_grant_and_move_on_rec(
&& lock_get_wait(lock) && lock_get_wait(lock)
&& !lock_rec_has_to_wait_in_queue(lock)) { && !lock_rec_has_to_wait_in_queue(lock)) {
lock_grant(lock, false); lock_grant(lock);
if (previous != NULL) { if (previous != NULL) {
/* Move the lock to the head of the list. */ /* Move the lock to the head of the list. */
...@@ -4516,7 +4489,7 @@ lock_rec_unlock( ...@@ -4516,7 +4489,7 @@ lock_rec_unlock(
/* Grant the lock */ /* Grant the lock */
ut_ad(trx != lock->trx); ut_ad(trx != lock->trx);
lock_grant(lock, false); lock_grant(lock);
} }
} }
} else { } else {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment