Commit 27d23c02 authored by Jan Lindström's avatar Jan Lindström

Merged percona-server-5.5.38-35.2.

parents 37ba4f37 a9e8b577
......@@ -4428,7 +4428,7 @@ btr_blob_free(
&& buf_block_get_space(block) == space
&& buf_block_get_page_no(block) == page_no) {
if (!buf_LRU_free_block(&block->page, (void *)&block->mutex, all, &have_LRU_mutex)
if (!buf_LRU_free_block(&block->page, all, TRUE)
&& all && block->page.zip.data
/* Now, buf_LRU_free_block() may release mutex temporarily */
&& buf_block_get_state(block) == BUF_BLOCK_FILE_PAGE
......@@ -4437,7 +4437,7 @@ btr_blob_free(
/* Attempt to deallocate the uncompressed page
if the whole block cannot be deallocted. */
buf_LRU_free_block(&block->page, (void *)&block->mutex, FALSE, &have_LRU_mutex);
buf_LRU_free_block(&block->page, FALSE, TRUE);
}
}
......
......@@ -1600,6 +1600,12 @@ buf_pool_watch_is_sentinel(
buf_pool_t* buf_pool, /*!< buffer pool instance */
const buf_page_t* bpage) /*!< in: block */
{
#ifdef UNIV_SYNC_DEBUG
ut_ad(rw_lock_own(&buf_pool->page_hash_latch, RW_LOCK_SHARED)
|| rw_lock_own(&buf_pool->page_hash_latch, RW_LOCK_EX)
|| mutex_own(buf_page_get_mutex(bpage)));
#endif
ut_ad(buf_page_in_file(bpage));
if (bpage < &buf_pool->watch[0]
......@@ -1976,7 +1982,6 @@ buf_page_get_zip(
ib_uint64_t start_time;
ib_uint64_t finish_time;
buf_pool_t* buf_pool = buf_pool_get(space, offset);
ibool have_LRU_mutex = FALSE;
if (UNIV_UNLIKELY(innobase_get_slow_log())) {
trx = innobase_get_trx();
......@@ -2048,40 +2053,33 @@ err_exit:
mutex_exit(block_mutex);
/* get LRU_list_mutex for buf_LRU_free_block() */
if (!have_LRU_mutex) {
mutex_enter(&buf_pool->LRU_list_mutex);
have_LRU_mutex = TRUE;
}
ut_ad(!mutex_own(&buf_pool->LRU_list_mutex));
mutex_enter(&buf_pool->LRU_list_mutex);
mutex_enter(block_mutex);
if (UNIV_UNLIKELY(bpage->space != space
if (UNIV_UNLIKELY((buf_page_get_state(bpage)
!= BUF_BLOCK_FILE_PAGE)
|| bpage->space != space
|| bpage->offset != offset
|| !bpage->in_LRU_list
|| !bpage->zip.data)) {
/* someone should interrupt, retry */
if (have_LRU_mutex) {
mutex_exit(&buf_pool->LRU_list_mutex);
have_LRU_mutex = FALSE;
}
ut_ad(mutex_own(&buf_pool->LRU_list_mutex));
mutex_exit(&buf_pool->LRU_list_mutex);
mutex_exit(block_mutex);
goto lookup;
}
/* Discard the uncompressed page frame if possible. */
if (buf_LRU_free_block(bpage, (void *)block_mutex, FALSE, &have_LRU_mutex)) {
if (have_LRU_mutex) {
mutex_exit(&buf_pool->LRU_list_mutex);
have_LRU_mutex = FALSE;
}
if (buf_LRU_free_block(bpage, FALSE, TRUE)) {
ut_ad(mutex_own(&buf_pool->LRU_list_mutex));
mutex_exit(&buf_pool->LRU_list_mutex);
mutex_exit(block_mutex);
goto lookup;
}
if (have_LRU_mutex) {
mutex_exit(&buf_pool->LRU_list_mutex);
have_LRU_mutex = FALSE;
}
ut_ad(mutex_own(&buf_pool->LRU_list_mutex));
mutex_exit(&buf_pool->LRU_list_mutex);
buf_block_buf_fix_inc((buf_block_t*) bpage,
__FILE__, __LINE__);
......@@ -2464,7 +2462,6 @@ buf_page_get_gen(
ib_uint64_t start_time;
ib_uint64_t finish_time;
buf_pool_t* buf_pool = buf_pool_get(space, offset);
ibool have_LRU_mutex = FALSE;
ut_ad(mtr);
ut_ad(mtr->state == MTR_ACTIVE);
......@@ -2566,21 +2563,9 @@ loop2:
|| mode == BUF_PEEK_IF_IN_POOL
|| mode == BUF_GET_IF_IN_POOL_OR_WATCH) {
if (have_LRU_mutex) {
mutex_exit(&buf_pool->LRU_list_mutex);
have_LRU_mutex = FALSE;
}
return(NULL);
}
/* We should not hold LRU mutex below when trying
to read the page */
if (have_LRU_mutex) {
mutex_exit(&buf_pool->LRU_list_mutex);
have_LRU_mutex = FALSE;
}
if (buf_read_page(space, zip_size, offset, trx)) {
buf_read_ahead_random(space, zip_size, offset,
ibuf_inside(mtr), trx);
......@@ -2633,11 +2618,6 @@ null_exit:
//buf_pool_mutex_exit(buf_pool);
mutex_exit(block_mutex);
if (have_LRU_mutex) {
mutex_exit(&buf_pool->LRU_list_mutex);
have_LRU_mutex = FALSE;
}
return(NULL);
}
......@@ -2646,11 +2626,6 @@ null_exit:
mutex_exit(block_mutex);
if (have_LRU_mutex) {
mutex_exit(&buf_pool->LRU_list_mutex);
have_LRU_mutex = FALSE;
}
return(NULL);
}
......@@ -2711,12 +2686,8 @@ wait_until_unfixed:
ut_a(block);
block_mutex = &block->mutex;
//buf_pool_mutex_enter(buf_pool);
if (!have_LRU_mutex) {
mutex_enter(&buf_pool->LRU_list_mutex);
have_LRU_mutex = TRUE;
}
ut_ad(!mutex_own(&buf_pool->LRU_list_mutex));
mutex_enter(&buf_pool->LRU_list_mutex);
rw_lock_x_lock(&buf_pool->page_hash_latch);
mutex_enter(&block->mutex);
mutex_enter(&buf_pool->zip_mutex);
......@@ -2740,10 +2711,8 @@ wait_until_unfixed:
rw_lock_x_unlock(&buf_pool->page_hash_latch);
if (have_LRU_mutex) {
mutex_exit(&buf_pool->LRU_list_mutex);
have_LRU_mutex = FALSE;
}
ut_ad(mutex_own(&buf_pool->LRU_list_mutex));
mutex_exit(&buf_pool->LRU_list_mutex);
goto wait_until_unfixed;
}
......@@ -2782,10 +2751,8 @@ wait_until_unfixed:
/* Insert at the front of unzip_LRU list */
buf_unzip_LRU_add_block(block, FALSE);
if (have_LRU_mutex) {
mutex_exit(&buf_pool->LRU_list_mutex);
have_LRU_mutex = FALSE;
}
ut_ad(mutex_own(&buf_pool->LRU_list_mutex));
mutex_exit(&buf_pool->LRU_list_mutex);
block->page.buf_fix_count = 1;
buf_block_set_io_fix(block, BUF_IO_READ);
......@@ -2859,12 +2826,7 @@ wait_until_unfixed:
insert buffer (change buffer) as much as possible. */
ulint page_no = buf_block_get_page_no(block);
if (!have_LRU_mutex) {
mutex_enter(&buf_pool->LRU_list_mutex);
have_LRU_mutex = TRUE;
}
if (buf_LRU_free_block(&block->page, (void *)block_mutex, TRUE, &have_LRU_mutex)) {
if (buf_LRU_free_block(&block->page, TRUE, FALSE)) {
mutex_exit(block_mutex);
if (mode == BUF_GET_IF_IN_POOL_OR_WATCH) {
/* Set the watch, as it would have
......@@ -2889,11 +2851,6 @@ wait_until_unfixed:
"innodb_change_buffering_debug evict %u %u\n",
(unsigned) space, (unsigned) offset);
if (have_LRU_mutex){
mutex_exit(&buf_pool->LRU_list_mutex);
have_LRU_mutex = FALSE;
}
return(NULL);
} else if (UNIV_UNLIKELY(buf_block_get_state(block)
!= BUF_BLOCK_FILE_PAGE
......@@ -2910,10 +2867,7 @@ wait_until_unfixed:
} else {
/* We should not hold LRU mutex below when trying
to flush page */
if (have_LRU_mutex) {
mutex_exit(&buf_pool->LRU_list_mutex);
have_LRU_mutex = FALSE;
}
ut_ad(!mutex_own(&buf_pool->LRU_list_mutex));
if (buf_flush_page_try(buf_pool, block)) {
fprintf(stderr,
......@@ -3022,11 +2976,6 @@ wait_until_unfixed:
_increment_page_get_statistics(block, trx);
}
if (have_LRU_mutex) {
mutex_exit(&buf_pool->LRU_list_mutex);
have_LRU_mutex = FALSE;
}
return(block);
}
......
......@@ -1606,7 +1606,11 @@ buf_flush_page_and_try_neighbors(
}
ut_a(buf_page_in_file(bpage)
|| buf_page_get_state(bpage) == BUF_BLOCK_REMOVE_HASH);
|| (buf_page_get_state(bpage) == BUF_BLOCK_REMOVE_HASH
#ifdef UNIV_DEBUG
&& !mutex_own(&buf_pool->LRU_list_mutex)
#endif
));
if (buf_flush_ready_for_flush(bpage, flush_type)) {
ulint space;
......
......@@ -177,24 +177,15 @@ UNIV_INLINE
ibool
buf_LRU_evict_from_unzip_LRU(
/*=========================*/
buf_pool_t* buf_pool,
ibool* have_LRU_mutex)
buf_pool_t* buf_pool)
{
ulint io_avg;
ulint unzip_avg;
//ut_ad(buf_pool_mutex_own(buf_pool));
ut_ad(mutex_own(&buf_pool->LRU_list_mutex));
if (!*have_LRU_mutex) {
mutex_enter(&buf_pool->LRU_list_mutex);
*have_LRU_mutex = TRUE;
}
/* If the unzip_LRU list is empty, we can only use the LRU. */
if (UT_LIST_GET_LEN(buf_pool->unzip_LRU) == 0) {
if (*have_LRU_mutex) {
mutex_exit(&buf_pool->LRU_list_mutex);
*have_LRU_mutex = FALSE;
}
return(FALSE);
}
......@@ -203,26 +194,14 @@ buf_LRU_evict_from_unzip_LRU(
decompressed pages in the buffer pool. */
if (UT_LIST_GET_LEN(buf_pool->unzip_LRU)
<= UT_LIST_GET_LEN(buf_pool->LRU) / 10) {
if (*have_LRU_mutex) {
mutex_exit(&buf_pool->LRU_list_mutex);
*have_LRU_mutex = FALSE;
}
return(FALSE);
}
/* If eviction hasn't started yet, we assume by default
that a workload is disk bound. */
if (buf_pool->freed_page_clock == 0) {
if (*have_LRU_mutex) {
mutex_exit(&buf_pool->LRU_list_mutex);
*have_LRU_mutex = FALSE;
}
return(TRUE);
}
if (*have_LRU_mutex) {
mutex_exit(&buf_pool->LRU_list_mutex);
*have_LRU_mutex = FALSE;
}
/* Calculate the average over past intervals, and add the values
of the current interval. */
......@@ -626,6 +605,8 @@ buf_flush_or_remove_pages(
ibool all_freed = TRUE;
ibool must_restart = FALSE;
ut_ad(mutex_own(&buf_pool->LRU_list_mutex));
buf_flush_list_mutex_enter(buf_pool);
for (bpage = UT_LIST_GET_LAST(buf_pool->flush_list);
......@@ -946,20 +927,18 @@ ibool
buf_LRU_free_from_unzip_LRU_list(
/*=============================*/
buf_pool_t* buf_pool, /*!< in: buffer pool instance */
ulint n_iterations, /*!< in: how many times this has
ulint n_iterations) /*!< in: how many times this has
been called repeatedly without
result: a high value means that
we should search farther; we will
search n_iterations / 5 of the
unzip_LRU list, or nothing if
n_iterations >= 5 */
ibool* have_LRU_mutex)
{
buf_block_t* block;
ulint distance;
ibool taken_LRU_mutex = FALSE;
//ut_ad(buf_pool_mutex_own(buf_pool));
ut_ad(mutex_own(&buf_pool->LRU_list_mutex));
/* Theoratically it should be much easier to find a victim
from unzip_LRU as we can choose even a dirty block (as we'll
......@@ -969,7 +948,7 @@ buf_LRU_free_from_unzip_LRU_list(
if we have done five iterations so far. */
if (UNIV_UNLIKELY(n_iterations >= 5)
|| !buf_LRU_evict_from_unzip_LRU(buf_pool, have_LRU_mutex)) {
|| !buf_LRU_evict_from_unzip_LRU(buf_pool)) {
return(FALSE);
}
......@@ -977,54 +956,25 @@ buf_LRU_free_from_unzip_LRU_list(
distance = 100 + (n_iterations
* UT_LIST_GET_LEN(buf_pool->unzip_LRU)) / 5;
if (!*have_LRU_mutex) {
mutex_enter(&buf_pool->LRU_list_mutex);
taken_LRU_mutex = TRUE;
*have_LRU_mutex = TRUE;
}
restart:
for (block = UT_LIST_GET_LAST(buf_pool->unzip_LRU);
UNIV_LIKELY(block != NULL) && UNIV_LIKELY(distance > 0);
block = UT_LIST_GET_PREV(unzip_LRU, block), distance--) {
ibool freed;
mutex_enter(&block->mutex);
if (!block->in_unzip_LRU_list || !block->page.in_LRU_list
|| buf_block_get_state(block) != BUF_BLOCK_FILE_PAGE) {
mutex_exit(&block->mutex);
goto restart;
}
ut_ad(mutex_own(&buf_pool->LRU_list_mutex));
ut_a(block->page.buf_pool_index < srv_buf_pool_instances);
ut_ad(buf_block_get_state(block) == BUF_BLOCK_FILE_PAGE);
ut_ad(block->in_unzip_LRU_list);
ut_ad(block->page.in_LRU_list);
freed = buf_LRU_free_block(&block->page, (void *)&block->mutex, FALSE, have_LRU_mutex);
mutex_enter(&block->mutex);
freed = buf_LRU_free_block(&block->page, FALSE, TRUE);
mutex_exit(&block->mutex);
if (freed) {
if (taken_LRU_mutex && *have_LRU_mutex) {
mutex_exit(&buf_pool->LRU_list_mutex);
*have_LRU_mutex = FALSE;
}
return(TRUE);
} else if (!*have_LRU_mutex) {
*have_LRU_mutex = TRUE;
mutex_enter(&buf_pool->LRU_list_mutex);
taken_LRU_mutex = TRUE;
}
}
if (taken_LRU_mutex && *have_LRU_mutex) {
mutex_exit(&buf_pool->LRU_list_mutex);
*have_LRU_mutex = FALSE;
}
return(FALSE);
}
......@@ -1036,55 +986,37 @@ ibool
buf_LRU_free_from_common_LRU_list(
/*==============================*/
buf_pool_t* buf_pool,
ulint n_iterations,
ulint n_iterations)
/*!< in: how many times this has been called
repeatedly without result: a high value means
that we should search farther; if
n_iterations < 10, then we search
n_iterations / 10 * buf_pool->curr_size
pages from the end of the LRU list */
ibool* have_LRU_mutex)
{
buf_page_t* bpage;
ulint distance;
ibool taken_LRU_mutex = FALSE;
//ut_ad(buf_pool_mutex_own(buf_pool));
ut_ad(mutex_own(&buf_pool->LRU_list_mutex));
distance = 100 + (n_iterations * buf_pool->curr_size) / 10;
if (!*have_LRU_mutex) {
mutex_enter(&buf_pool->LRU_list_mutex);
taken_LRU_mutex = TRUE;
*have_LRU_mutex = TRUE;
}
restart:
for (bpage = UT_LIST_GET_LAST(buf_pool->LRU);
UNIV_LIKELY(bpage != NULL) && UNIV_LIKELY(distance > 0);
bpage = UT_LIST_GET_PREV(LRU, bpage), distance--) {
ibool freed;
unsigned accessed;
mutex_t* block_mutex = buf_page_get_mutex_enter(bpage);
if (!block_mutex) {
goto restart;
}
if (!bpage->in_LRU_list
|| !buf_page_in_file(bpage)) {
mutex_exit(block_mutex);
goto restart;
}
mutex_t* block_mutex = buf_page_get_mutex(bpage);
ut_ad(mutex_own(&buf_pool->LRU_list_mutex));
ut_ad(buf_page_in_file(bpage));
ut_ad(bpage->in_LRU_list);
ut_a(bpage->buf_pool_index < srv_buf_pool_instances);
mutex_enter(block_mutex);
accessed = buf_page_is_accessed(bpage);
freed = buf_LRU_free_block(bpage, (void *)block_mutex, TRUE, have_LRU_mutex);
freed = buf_LRU_free_block(bpage, TRUE, TRUE);
mutex_exit(block_mutex);
if (freed) {
......@@ -1095,24 +1027,10 @@ restart:
++buf_pool->stat.n_ra_pages_evicted;
}
if (taken_LRU_mutex && *have_LRU_mutex) {
mutex_exit(&buf_pool->LRU_list_mutex);
*have_LRU_mutex = FALSE;
}
return(TRUE);
} else if (!*have_LRU_mutex) {
mutex_enter(&buf_pool->LRU_list_mutex);
taken_LRU_mutex = TRUE;
*have_LRU_mutex = TRUE;
}
}
if (taken_LRU_mutex && *have_LRU_mutex) {
mutex_exit(&buf_pool->LRU_list_mutex);
*have_LRU_mutex = FALSE;
}
return(FALSE);
}
......@@ -1136,23 +1054,19 @@ buf_LRU_search_and_free_block(
n_iterations / 5 of the unzip_LRU list. */
{
ibool freed = FALSE;
ibool have_LRU_mutex = FALSE;
//buf_pool_mutex_enter(buf_pool);
if (UT_LIST_GET_LEN(buf_pool->unzip_LRU)) {
ut_ad(!mutex_own(&buf_pool->LRU_list_mutex));
mutex_enter(&buf_pool->LRU_list_mutex);
have_LRU_mutex = TRUE;
}
mutex_enter(&buf_pool->LRU_list_mutex);
freed = buf_LRU_free_from_unzip_LRU_list(buf_pool, n_iterations, &have_LRU_mutex);
freed = buf_LRU_free_from_unzip_LRU_list(buf_pool, n_iterations);
if (!freed) {
ut_ad(mutex_own(&buf_pool->LRU_list_mutex));
freed = buf_LRU_free_from_common_LRU_list(
buf_pool, n_iterations, &have_LRU_mutex);
buf_pool, n_iterations);
}
buf_pool_mutex_enter(buf_pool);
if (!freed) {
buf_pool->LRU_flush_ended = 0;
} else if (buf_pool->LRU_flush_ended > 0) {
......@@ -1160,8 +1074,8 @@ buf_LRU_search_and_free_block(
}
buf_pool_mutex_exit(buf_pool);
if (have_LRU_mutex)
mutex_exit(&buf_pool->LRU_list_mutex);
mutex_exit(&buf_pool->LRU_list_mutex);
return(freed);
}
......@@ -1893,17 +1807,16 @@ ibool
buf_LRU_free_block(
/*===============*/
buf_page_t* bpage, /*!< in: block to be freed */
void* block_mutex, /*!< in: block mutex or NULL */
ibool zip, /*!< in: TRUE if should remove also the
compressed page of an uncompressed page */
ibool* have_LRU_mutex)
ibool have_LRU_mutex)
{
buf_page_t* b = NULL;
buf_pool_t* buf_pool = buf_pool_from_bpage(bpage);
//mutex_t* block_mutex = buf_page_get_mutex(bpage);
mutex_t* block_mutex = buf_page_get_mutex(bpage);
//ut_ad(buf_pool_mutex_own(buf_pool));
ut_ad(mutex_own((mutex_t*)block_mutex));
ut_ad(mutex_own(block_mutex));
ut_ad(buf_page_in_file(bpage));
//ut_ad(bpage->in_LRU_list);
ut_ad(!bpage->in_flush_list == !bpage->oldest_modification);
......@@ -1960,15 +1873,12 @@ alloc:
#endif /* UNIV_DEBUG */
/* not to break latch order, must re-enter block_mutex */
mutex_exit((mutex_t*)block_mutex);
if (!*have_LRU_mutex) {
mutex_enter(&buf_pool->LRU_list_mutex);
*have_LRU_mutex = TRUE;
}
mutex_exit(block_mutex);
if (!have_LRU_mutex)
mutex_enter(&buf_pool->LRU_list_mutex); /* optimistic */
rw_lock_x_lock(&buf_pool->page_hash_latch);
mutex_enter((mutex_t*)block_mutex);
mutex_enter(block_mutex);
/* recheck states of block */
if (!bpage->in_LRU_list || block_mutex != buf_page_get_mutex(bpage)
......@@ -1977,12 +1887,8 @@ not_freed:
if (b) {
buf_page_free_descriptor(b);
}
if (*have_LRU_mutex) {
if (!have_LRU_mutex)
mutex_exit(&buf_pool->LRU_list_mutex);
*have_LRU_mutex = FALSE;
}
rw_lock_x_unlock(&buf_pool->page_hash_latch);
return(FALSE);
} else if (zip || !bpage->zip.data) {
......@@ -2117,14 +2023,9 @@ not_freed:
}
//buf_pool_mutex_exit(buf_pool);
if (*have_LRU_mutex) {
mutex_exit(&buf_pool->LRU_list_mutex);
*have_LRU_mutex = FALSE;
}
mutex_exit(&buf_pool->LRU_list_mutex);
rw_lock_x_unlock(&buf_pool->page_hash_latch);
mutex_exit((mutex_t*)block_mutex);
mutex_exit(block_mutex);
/* Remove possible adaptive hash index on the page.
The page was declared uninitialized by
......@@ -2156,13 +2057,9 @@ not_freed:
}
//buf_pool_mutex_enter(buf_pool);
if (!*have_LRU_mutex) {
if (have_LRU_mutex)
mutex_enter(&buf_pool->LRU_list_mutex);
*have_LRU_mutex = TRUE;
}
mutex_enter((mutex_t*)block_mutex);
mutex_enter(block_mutex);
if (b) {
mutex_enter(&buf_pool->zip_mutex);
......@@ -2171,23 +2068,15 @@ not_freed:
}
buf_LRU_block_free_hashed_page((buf_block_t*) bpage, FALSE);
if (*have_LRU_mutex) {
mutex_exit(&buf_pool->LRU_list_mutex);
*have_LRU_mutex = FALSE;
}
} else {
/* The block_mutex should have been released by
buf_LRU_block_remove_hashed_page() when it returns
BUF_BLOCK_ZIP_FREE. */
ut_ad((mutex_t*)block_mutex == &buf_pool->zip_mutex);
mutex_enter((mutex_t*)block_mutex);
ut_ad(block_mutex == &buf_pool->zip_mutex);
mutex_enter(block_mutex);
if (*have_LRU_mutex) {
if (!have_LRU_mutex)
mutex_exit(&buf_pool->LRU_list_mutex);
*have_LRU_mutex = FALSE;
}
rw_lock_x_unlock(&buf_pool->page_hash_latch);
}
......
......@@ -340,9 +340,13 @@ buf_read_ahead_random(
return(0);
}
buf_pool_mutex_exit(buf_pool);
/* Count how many blocks in the area have been recently accessed,
that is, reside near the start of the LRU list. */
rw_lock_s_lock(&buf_pool->page_hash_latch);
for (i = low; i < high; i++) {
const buf_page_t* bpage =
buf_page_hash_get(buf_pool, space, i);
......@@ -356,13 +360,13 @@ buf_read_ahead_random(
if (recent_blocks
>= BUF_READ_AHEAD_RANDOM_THRESHOLD(buf_pool)) {
buf_pool_mutex_exit(buf_pool);
rw_lock_s_unlock(&buf_pool->page_hash_latch);
goto read_ahead;
}
}
}
buf_pool_mutex_exit(buf_pool);
rw_lock_s_unlock(&buf_pool->page_hash_latch);
/* Do nothing */
return(0);
......
......@@ -12693,9 +12693,9 @@ innodb_buffer_pool_evict_update(
for (ulint i = 0; i < srv_buf_pool_instances; i++) {
buf_pool_t* buf_pool = &buf_pool_ptr[i];
ibool have_LRU_mutex = TRUE;
//buf_pool_mutex_enter(buf_pool);
ut_ad(!mutex_own(&buf_pool->LRU_list_mutex));
mutex_enter(&buf_pool->LRU_list_mutex);
for (buf_block_t* block = UT_LIST_GET_LAST(
......@@ -12711,15 +12711,15 @@ innodb_buffer_pool_evict_update(
ut_ad(block->page.in_LRU_list);
mutex_enter(&block->mutex);
ut_ad(mutex_own(&buf_pool->LRU_list_mutex));
buf_LRU_free_block(&block->page,
(void *)&block->mutex,FALSE, &have_LRU_mutex);
FALSE, TRUE);
mutex_exit(&block->mutex);
block = prev_block;
}
if (have_LRU_mutex) {
mutex_exit(&buf_pool->LRU_list_mutex);
}
ut_ad(mutex_own(&buf_pool->LRU_list_mutex));
mutex_exit(&buf_pool->LRU_list_mutex);
//buf_pool_mutex_exit(buf_pool);
}
}
......
......@@ -689,6 +689,11 @@ buf_page_get_block(
/*===============*/
buf_page_t* bpage) /*!< in: control block, or NULL */
{
#ifdef UNIV_SYNC_DEBUG
buf_pool_t* buf_pool = buf_pool_from_bpage(bpage);
ut_ad(rw_lock_own(&buf_pool->page_hash_latch, RW_LOCK_SHARED)
|| rw_lock_own(&buf_pool->page_hash_latch, RW_LOCK_EX));
#endif
if (UNIV_LIKELY(bpage != NULL)) {
ut_ad(buf_page_in_file(bpage));
......
......@@ -99,10 +99,9 @@ ibool
buf_LRU_free_block(
/*===============*/
buf_page_t* bpage, /*!< in: block to be freed */
void* block_mutex, /*!< in: block mutex or NULL */
ibool zip, /*!< in: TRUE if should remove also the
compressed page of an uncompressed page */
ibool* have_LRU_mutex)
ibool have_LRU_mutex)
__attribute__((nonnull));
/******************************************************************//**
Try to free a replaceable block.
......
......@@ -64,10 +64,10 @@ component, i.e. we show M.N.P as M.N */
(INNODB_VERSION_MAJOR << 8 | INNODB_VERSION_MINOR)
#ifndef PERCONA_INNODB_VERSION
#define PERCONA_INNODB_VERSION 35.0
#define PERCONA_INNODB_VERSION 35.2
#endif
#define INNODB_VERSION_STR "5.5.37-MariaDB-" IB_TO_STR(PERCONA_INNODB_VERSION)
#define INNODB_VERSION_STR "5.5.38-MariaDB-" IB_TO_STR(PERCONA_INNODB_VERSION)
#define REFMAN "http://dev.mysql.com/doc/refman/" \
IB_TO_STR(MYSQL_MAJOR_VERSION) "." \
......
......@@ -143,6 +143,7 @@ row_ins_alloc_sys_fields(
const dict_col_t* col;
dfield_t* dfield;
byte* ptr;
uint len;
row = node->row;
table = node->table;
......@@ -151,35 +152,37 @@ row_ins_alloc_sys_fields(
ut_ad(row && table && heap);
ut_ad(dtuple_get_n_fields(row) == dict_table_get_n_cols(table));
/* 1. Allocate buffer for row id */
/* allocate buffer to hold the needed system created hidden columns. */
len = DATA_ROW_ID_LEN + DATA_TRX_ID_LEN + DATA_ROLL_PTR_LEN;
ptr = mem_heap_zalloc(heap, len);
/* 1. Populate row-id */
col = dict_table_get_sys_col(table, DATA_ROW_ID);
dfield = dtuple_get_nth_field(row, dict_col_get_no(col));
ptr = mem_heap_zalloc(heap, DATA_ROW_ID_LEN);
dfield_set_data(dfield, ptr, DATA_ROW_ID_LEN);
node->row_id_buf = ptr;
/* 3. Allocate buffer for trx id */
ptr += DATA_ROW_ID_LEN;
/* 2. Populate trx id */
col = dict_table_get_sys_col(table, DATA_TRX_ID);
dfield = dtuple_get_nth_field(row, dict_col_get_no(col));
ptr = mem_heap_zalloc(heap, DATA_TRX_ID_LEN);
dfield_set_data(dfield, ptr, DATA_TRX_ID_LEN);
node->trx_id_buf = ptr;
/* 4. Allocate buffer for roll ptr */
ptr += DATA_TRX_ID_LEN;
/* 3. Populate roll ptr */
col = dict_table_get_sys_col(table, DATA_ROLL_PTR);
dfield = dtuple_get_nth_field(row, dict_col_get_no(col));
ptr = mem_heap_zalloc(heap, DATA_ROLL_PTR_LEN);
dfield_set_data(dfield, ptr, DATA_ROLL_PTR_LEN);
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment