Commit 7f3950a2 authored by Jan Lindström's avatar Jan Lindström

Moved mt-flush code to buf0mtflu.[cc|h] and cleaned it up. This is for

InnoDB.
parent 921d87d4
...@@ -278,8 +278,7 @@ SET(INNOBASE_SOURCES ...@@ -278,8 +278,7 @@ SET(INNOBASE_SOURCES
buf/buf0flu.cc buf/buf0flu.cc
buf/buf0lru.cc buf/buf0lru.cc
buf/buf0rea.cc buf/buf0rea.cc
# TODO: JAN uncomment buf/buf0mtflu.cc
# buf/buf0mtflu.cc
data/data0data.cc data/data0data.cc
data/data0type.cc data/data0type.cc
dict/dict0boot.cc dict/dict0boot.cc
......
...@@ -32,6 +32,7 @@ Created 11/11/1995 Heikki Tuuri ...@@ -32,6 +32,7 @@ Created 11/11/1995 Heikki Tuuri
#endif #endif
#include "buf0buf.h" #include "buf0buf.h"
#include "buf0mtflu.h"
#include "buf0checksum.h" #include "buf0checksum.h"
#include "srv0start.h" #include "srv0start.h"
#include "srv0srv.h" #include "srv0srv.h"
...@@ -1680,7 +1681,6 @@ pages: to avoid deadlocks, this function must be written so that it cannot ...@@ -1680,7 +1681,6 @@ pages: to avoid deadlocks, this function must be written so that it cannot
end up waiting for these latches! NOTE 2: in the case of a flush list flush, end up waiting for these latches! NOTE 2: in the case of a flush list flush,
the calling thread is not allowed to own any latches on pages! the calling thread is not allowed to own any latches on pages!
@return number of blocks for which the write request was queued */ @return number of blocks for which the write request was queued */
//static
ulint ulint
buf_flush_batch( buf_flush_batch(
/*============*/ /*============*/
...@@ -1737,7 +1737,6 @@ buf_flush_batch( ...@@ -1737,7 +1737,6 @@ buf_flush_batch(
/******************************************************************//** /******************************************************************//**
Gather the aggregated stats for both flush list and LRU list flushing */ Gather the aggregated stats for both flush list and LRU list flushing */
//static
void void
buf_flush_common( buf_flush_common(
/*=============*/ /*=============*/
...@@ -1762,7 +1761,6 @@ buf_flush_common( ...@@ -1762,7 +1761,6 @@ buf_flush_common(
/******************************************************************//** /******************************************************************//**
Start a buffer flush batch for LRU or flush list */ Start a buffer flush batch for LRU or flush list */
//static
ibool ibool
buf_flush_start( buf_flush_start(
/*============*/ /*============*/
...@@ -1791,7 +1789,6 @@ buf_flush_start( ...@@ -1791,7 +1789,6 @@ buf_flush_start(
/******************************************************************//** /******************************************************************//**
End a buffer flush batch for LRU or flush list */ End a buffer flush batch for LRU or flush list */
//static
void void
buf_flush_end( buf_flush_end(
/*==========*/ /*==========*/
...@@ -1846,50 +1843,6 @@ buf_flush_wait_batch_end( ...@@ -1846,50 +1843,6 @@ buf_flush_wait_batch_end(
} }
} }
/* JAN: TODO: */
/*******************************************************************//**
This utility flushes dirty blocks from the end of the LRU list and also
puts replaceable clean pages from the end of the LRU list to the free
list.
NOTE: The calling thread is not allowed to own any latches on pages!
@return true if a batch was queued successfully. false if another batch
of same type was already running. */
static
bool
pgcomp_buf_flush_LRU(
/*==========*/
buf_pool_t* buf_pool, /*!< in/out: buffer pool instance */
ulint min_n, /*!< in: wished minimum mumber of blocks
flushed (it is not guaranteed that the
actual number is that big, though) */
ulint* n_processed) /*!< out: the number of pages
which were processed is passed
back to caller. Ignored if NULL */
{
ulint page_count;
if (n_processed) {
*n_processed = 0;
}
if (!buf_flush_start(buf_pool, BUF_FLUSH_LRU)) {
return(false);
}
page_count = buf_flush_batch(buf_pool, BUF_FLUSH_LRU, min_n, 0);
buf_flush_end(buf_pool, BUF_FLUSH_LRU);
buf_flush_common(BUF_FLUSH_LRU, page_count);
if (n_processed) {
*n_processed = page_count;
}
return(true);
}
/* JAN: TODO: END: */
/*******************************************************************//** /*******************************************************************//**
This utility flushes dirty blocks from the end of the LRU list and also This utility flushes dirty blocks from the end of the LRU list and also
puts replaceable clean pages from the end of the LRU list to the free puts replaceable clean pages from the end of the LRU list to the free
...@@ -1932,125 +1885,6 @@ buf_flush_LRU( ...@@ -1932,125 +1885,6 @@ buf_flush_LRU(
return(true); return(true);
} }
/* JAN: TODO: */
/*******************************************************************//**/
extern int is_pgcomp_wrk_init_done(void);
extern int pgcomp_flush_work_items(
int buf_pool_inst,
int *pages_flushed,
enum buf_flush flush_type,
int min_n,
lsn_t lsn_limit);
#define MT_COMP_WATER_MARK 50
#ifdef UNIV_DEBUG
#include <time.h>
int timediff(struct timeval *g_time, struct timeval *s_time, struct timeval *d_time)
{
if (g_time->tv_usec < s_time->tv_usec)
{
int nsec = (s_time->tv_usec - g_time->tv_usec) / 1000000 + 1;
s_time->tv_usec -= 1000000 * nsec;
s_time->tv_sec += nsec;
}
if (g_time->tv_usec - s_time->tv_usec > 1000000)
{
int nsec = (s_time->tv_usec - g_time->tv_usec) / 1000000;
s_time->tv_usec += 1000000 * nsec;
s_time->tv_sec -= nsec;
}
d_time->tv_sec = g_time->tv_sec - s_time->tv_sec;
d_time->tv_usec = g_time->tv_usec - s_time->tv_usec;
return 0;
}
#endif
static os_fast_mutex_t pgcomp_mtx;
void pgcomp_init(void)
{
os_fast_mutex_init(PFS_NOT_INSTRUMENTED, &pgcomp_mtx);
}
void pgcomp_deinit(void)
{
os_fast_mutex_free(&pgcomp_mtx);
}
/*******************************************************************//**
Multi-threaded version of buf_flush_list
*/
UNIV_INTERN
bool
pgcomp_buf_flush_list(
/*==================*/
ulint min_n, /*!< in: wished minimum mumber of blocks
flushed (it is not guaranteed that the
actual number is that big, though) */
lsn_t lsn_limit, /*!< in the case BUF_FLUSH_LIST all
blocks whose oldest_modification is
smaller than this should be flushed
(if their number does not exceed
min_n), otherwise ignored */
ulint* n_processed) /*!< out: the number of pages
which were processed is passed
back to caller. Ignored if NULL */
{
ulint i;
bool success = true;
#ifdef UNIV_DEBUG
struct timeval p_start_time, p_end_time, d_time;
#endif
int cnt_flush[MTFLUSH_MAX_WORKER];
if (n_processed) {
*n_processed = 0;
}
if (min_n != ULINT_MAX) {
/* Ensure that flushing is spread evenly amongst the
buffer pool instances. When min_n is ULINT_MAX
we need to flush everything up to the lsn limit
so no limit here. */
min_n = (min_n + srv_buf_pool_instances - 1)
/ srv_buf_pool_instances;
}
#ifdef UNIV_DEBUG
gettimeofday(&p_start_time, 0x0);
#endif
os_fast_mutex_lock(&pgcomp_mtx);
pgcomp_flush_work_items(srv_buf_pool_instances,
cnt_flush, BUF_FLUSH_LIST,
min_n, lsn_limit);
os_fast_mutex_unlock(&pgcomp_mtx);
for (i = 0; i < srv_buf_pool_instances; i++) {
if (n_processed) {
*n_processed += cnt_flush[i];
}
if (cnt_flush[i]) {
MONITOR_INC_VALUE_CUMULATIVE(
MONITOR_FLUSH_BATCH_TOTAL_PAGE,
MONITOR_FLUSH_BATCH_COUNT,
MONITOR_FLUSH_BATCH_PAGES,
cnt_flush[i]);
}
}
#ifdef UNIV_DEBUG
gettimeofday(&p_end_time, 0x0);
timediff(&p_end_time, &p_start_time, &d_time);
fprintf(stderr, "%s: [1] [*n_processed: (min:%lu)%lu %llu usec]\n",
__FUNCTION__, (min_n * srv_buf_pool_instances), *n_processed,
(unsigned long long)(d_time.tv_usec+(d_time.tv_sec*1000000)));
#endif
return(success);
}
/* JAN: TODO: END: */
/*******************************************************************//** /*******************************************************************//**
This utility flushes dirty blocks from the end of the flush list of This utility flushes dirty blocks from the end of the flush list of
all buffer pool instances. all buffer pool instances.
...@@ -2078,11 +1912,9 @@ buf_flush_list( ...@@ -2078,11 +1912,9 @@ buf_flush_list(
ulint i; ulint i;
bool success = true; bool success = true;
/* JAN: TODO: */ if (buf_mtflu_init_done()) {
if (is_pgcomp_wrk_init_done()) { return(buf_mtflu_flush_list(min_n, lsn_limit, n_processed));
return(pgcomp_buf_flush_list(min_n, lsn_limit, n_processed));
} }
/* JAN: TODO: END: */
if (n_processed) { if (n_processed) {
*n_processed = 0; *n_processed = 0;
...@@ -2237,60 +2069,6 @@ buf_flush_single_page_from_LRU( ...@@ -2237,60 +2069,6 @@ buf_flush_single_page_from_LRU(
return(freed); return(freed);
} }
/* JAN: TODO: */
/*********************************************************************//**
pgcomp_Clears up tail of the LRU lists:
* Put replaceable pages at the tail of LRU to the free list
* Flush dirty pages at the tail of LRU to the disk
The depth to which we scan each buffer pool is controlled by dynamic
config parameter innodb_LRU_scan_depth.
@return total pages flushed */
UNIV_INTERN
ulint
pgcomp_buf_flush_LRU_tail(void)
/*====================*/
{
#ifdef UNIV_DEBUG
struct timeval p_start_time, p_end_time, d_time;
#endif
ulint total_flushed=0, i=0;
int cnt_flush[32];
#ifdef UNIV_DEBUG
gettimeofday(&p_start_time, 0x0);
#endif
ut_ad(is_pgcomp_wrk_init_done());
os_fast_mutex_lock(&pgcomp_mtx);
pgcomp_flush_work_items(srv_buf_pool_instances,
cnt_flush, BUF_FLUSH_LRU, srv_LRU_scan_depth, 0);
os_fast_mutex_unlock(&pgcomp_mtx);
for (i = 0; i < srv_buf_pool_instances; i++) {
if (cnt_flush[i]) {
total_flushed += cnt_flush[i];
MONITOR_INC_VALUE_CUMULATIVE(
MONITOR_LRU_BATCH_TOTAL_PAGE,
MONITOR_LRU_BATCH_COUNT,
MONITOR_LRU_BATCH_PAGES,
cnt_flush[i]);
}
}
#if UNIV_DEBUG
gettimeofday(&p_end_time, 0x0);
timediff(&p_end_time, &p_start_time, &d_time);
fprintf(stderr, "[1] [*n_processed: (min:%lu)%lu %llu usec]\n", (
srv_LRU_scan_depth * srv_buf_pool_instances), total_flushed,
(unsigned long long)(d_time.tv_usec+(d_time.tv_sec*1000000)));
#endif
return(total_flushed);
}
/* JAN: TODO: END: */
/*********************************************************************//** /*********************************************************************//**
Clears up tail of the LRU lists: Clears up tail of the LRU lists:
* Put replaceable pages at the tail of LRU to the free list * Put replaceable pages at the tail of LRU to the free list
...@@ -2304,12 +2082,11 @@ buf_flush_LRU_tail(void) ...@@ -2304,12 +2082,11 @@ buf_flush_LRU_tail(void)
/*====================*/ /*====================*/
{ {
ulint total_flushed = 0; ulint total_flushed = 0;
/* JAN: TODO: */
if(is_pgcomp_wrk_init_done()) if(buf_mtflu_init_done())
{ {
return(pgcomp_buf_flush_LRU_tail()); return(buf_mtflu_flush_LRU_tail());
} }
/* JAN: TODO: END */
for (ulint i = 0; i < srv_buf_pool_instances; i++) { for (ulint i = 0; i < srv_buf_pool_instances; i++) {
......
This diff is collapsed.
/***************************************************************************** /*****************************************************************************
Copyright (c) 1995, 2011, Oracle and/or its affiliates. All Rights Reserved. Copyright (c) 1995, 2011, Oracle and/or its affiliates. All Rights Reserved.
Copyright (c) 2014, SkySQL Ab.
This program is free software; you can redistribute it and/or modify it under This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software the terms of the GNU General Public License as published by the Free Software
...@@ -274,6 +275,54 @@ buf_flush_get_dirty_pages_count( ...@@ -274,6 +275,54 @@ buf_flush_get_dirty_pages_count(
#endif /* !UNIV_HOTBACKUP */ #endif /* !UNIV_HOTBACKUP */
/******************************************************************//**
Start a buffer flush batch for LRU or flush list */
ibool
buf_flush_start(
/*============*/
buf_pool_t* buf_pool, /*!< buffer pool instance */
enum buf_flush flush_type); /*!< in: BUF_FLUSH_LRU
or BUF_FLUSH_LIST */
/******************************************************************//**
End a buffer flush batch for LRU or flush list */
void
buf_flush_end(
/*==========*/
buf_pool_t* buf_pool, /*!< buffer pool instance */
enum buf_flush flush_type); /*!< in: BUF_FLUSH_LRU
or BUF_FLUSH_LIST */
/******************************************************************//**
Gather the aggregated stats for both flush list and LRU list flushing */
void
buf_flush_common(
/*=============*/
enum buf_flush flush_type, /*!< in: type of flush */
ulint page_count); /*!< in: number of pages flushed */
/*******************************************************************//**
This utility flushes dirty blocks from the end of the LRU list or flush_list.
NOTE 1: in the case of an LRU flush the calling thread may own latches to
pages: to avoid deadlocks, this function must be written so that it cannot
end up waiting for these latches! NOTE 2: in the case of a flush list flush,
the calling thread is not allowed to own any latches on pages!
@return number of blocks for which the write request was queued */
ulint
buf_flush_batch(
/*============*/
buf_pool_t* buf_pool, /*!< in: buffer pool instance */
enum buf_flush flush_type, /*!< in: BUF_FLUSH_LRU or
BUF_FLUSH_LIST; if BUF_FLUSH_LIST,
then the caller must not own any
latches on pages */
ulint min_n, /*!< in: wished minimum mumber of blocks
flushed (it is not guaranteed that the
actual number is that big, though) */
lsn_t lsn_limit); /*!< in: in the case of BUF_FLUSH_LIST
all blocks whose oldest_modification is
smaller than this should be flushed
(if their number does not exceed
min_n), otherwise ignored */
#ifndef UNIV_NONINL #ifndef UNIV_NONINL
#include "buf0flu.ic" #include "buf0flu.ic"
#endif #endif
......
/*****************************************************************************
Copyright (C) 2014 SkySQL Ab. All Rights Reserved.
Copyright (C) 2014 Fusion-io. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
Foundation; version 2 of the License.
This program is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License along with
this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*****************************************************************************/
/******************************************************************//**
@file include/buf0mtflu.h
Multi-threadef flush method interface function prototypes
Created 06/02/2014 Jan Lindström jan.lindstrom@skysql.com
Dhananjoy Das DDas@fusionio.com
***********************************************************************/
#ifndef buf0mtflu_h
#define buf0mtflu_h
/******************************************************************//**
Add exit work item to work queue to signal multi-threded flush
threads that they should exit.
*/
void
buf_mtflu_io_thread_exit(void);
/*===========================*/
/******************************************************************//**
Initialize multi-threaded flush thread syncronization data.
@return Initialized multi-threaded flush thread syncroniztion data. */
void*
buf_mtflu_handler_init(
/*===================*/
ulint n_threads, /*!< in: Number of threads to create */
ulint wrk_cnt); /*!< in: Number of work items */
/******************************************************************//**
Return true if multi-threaded flush is initialized
@return true if initialized, false if not */
bool
buf_mtflu_init_done(void);
/*======================*/
/*********************************************************************//**
Clears up tail of the LRU lists:
* Put replaceable pages at the tail of LRU to the free list
* Flush dirty pages at the tail of LRU to the disk
The depth to which we scan each buffer pool is controlled by dynamic
config parameter innodb_LRU_scan_depth.
@return total pages flushed */
UNIV_INTERN
ulint
buf_mtflu_flush_LRU_tail(void);
/*===========================*/
/*******************************************************************//**
Multi-threaded version of buf_flush_list
*/
bool
buf_mtflu_flush_list(
/*=================*/
ulint min_n, /*!< in: wished minimum mumber of blocks
flushed (it is not guaranteed that the
actual number is that big, though) */
lsn_t lsn_limit, /*!< in the case BUF_FLUSH_LIST all
blocks whose oldest_modification is
smaller than this should be flushed
(if their number does not exceed
min_n), otherwise ignored */
ulint* n_processed); /*!< out: the number of pages
which were processed is passed
back to caller. Ignored if NULL */
/*********************************************************************//**
Set correct thread identifiers to io thread array based on
information we have. */
void
buf_mtflu_set_thread_ids(
/*=====================*/
ulint n_threads, /*!<in: Number of threads to fill */
void* ctx, /*!<in: thread context */
os_thread_id_t* thread_ids); /*!<in: thread id array */
#endif
...@@ -259,7 +259,7 @@ extern my_bool srv_use_lz4; ...@@ -259,7 +259,7 @@ extern my_bool srv_use_lz4;
/* Number of flush threads */ /* Number of flush threads */
#define MTFLUSH_MAX_WORKER 64 #define MTFLUSH_MAX_WORKER 64
extern ulint srv_mtflush_threads; extern long srv_mtflush_threads;
#ifdef __WIN__ #ifdef __WIN__
extern ibool srv_use_native_conditions; extern ibool srv_use_native_conditions;
......
...@@ -37,7 +37,8 @@ Created 10/10/1995 Heikki Tuuri ...@@ -37,7 +37,8 @@ Created 10/10/1995 Heikki Tuuri
#endif #endif
/*********************************************************************//** /*********************************************************************//**
Normalizes a directory path for Windows: converts slashes to backslashes. */ Normalizes a directory path for Windows: converts slashes to backslashes.
*/
UNIV_INTERN UNIV_INTERN
void void
srv_normalize_path_for_win( srv_normalize_path_for_win(
......
...@@ -3,7 +3,7 @@ ...@@ -3,7 +3,7 @@
Copyright (c) 1995, 2012, Oracle and/or its affiliates. All Rights Reserved. Copyright (c) 1995, 2012, Oracle and/or its affiliates. All Rights Reserved.
Copyright (c) 2008, 2009 Google Inc. Copyright (c) 2008, 2009 Google Inc.
Copyright (c) 2009, Percona Inc. Copyright (c) 2009, Percona Inc.
Copyright (c) 2013, 2014, SkySQL Ab. Copyright (c) 2013, 2014, SkySQL Ab. All Rights Reserved.
Portions of this file contain modifications contributed and copyrighted by Portions of this file contain modifications contributed and copyrighted by
Google, Inc. Those modifications are gratefully acknowledged and are described Google, Inc. Those modifications are gratefully acknowledged and are described
...@@ -162,6 +162,8 @@ UNIV_INTERN my_bool srv_use_posix_fallocate = FALSE; ...@@ -162,6 +162,8 @@ UNIV_INTERN my_bool srv_use_posix_fallocate = FALSE;
UNIV_INTERN my_bool srv_use_atomic_writes = FALSE; UNIV_INTERN my_bool srv_use_atomic_writes = FALSE;
/* If this flag IS TRUE, then we use lz4 to compress/decompress pages */ /* If this flag IS TRUE, then we use lz4 to compress/decompress pages */
UNIV_INTERN my_bool srv_use_lz4 = FALSE; UNIV_INTERN my_bool srv_use_lz4 = FALSE;
/* Number of threads used for multi-threaded flush */
UNIV_INTERN long srv_mtflush_threads = 0;
#ifdef __WIN__ #ifdef __WIN__
/* Windows native condition variables. We use runtime loading / function /* Windows native condition variables. We use runtime loading / function
......
This diff is collapsed.
...@@ -1862,6 +1862,9 @@ buf_flush_start( ...@@ -1862,6 +1862,9 @@ buf_flush_start(
/* There is already a flush batch of the same type running */ /* There is already a flush batch of the same type running */
fprintf(stderr, "Error: flush_type %d n_flush %lu init_flush\n",
flush_type, buf_pool->n_flush[flush_type], buf_pool->init_flush[flush_type]);
mutex_exit(&buf_pool->flush_state_mutex); mutex_exit(&buf_pool->flush_state_mutex);
return(FALSE); return(FALSE);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment