Commit 82785bdb authored by Vadim Tkachenko's avatar Vadim Tkachenko

sync with patches rev12

parent eccce6ba
......@@ -3595,15 +3595,13 @@ buf_print_io(
buf_pool_mutex_enter();
fprintf(file,
"Buffer pool size %lu\n"
"Buffer pool size, bytes %lu\n"
"Free buffers %lu\n"
"Database pages %lu\n"
"Modified db pages %lu\n"
"Buffer pool size %lu\n"
"Free buffers %lu\n"
"Database pages %lu\n"
"Modified db pages %lu\n"
"Pending reads %lu\n"
"Pending writes: LRU %lu, flush list %lu, single page %lu\n",
(ulong) size,
(ulong) size * UNIV_PAGE_SIZE,
(ulong) UT_LIST_GET_LEN(buf_pool->free),
(ulong) UT_LIST_GET_LEN(buf_pool->LRU),
(ulong) UT_LIST_GET_LEN(buf_pool->flush_list),
......
......@@ -187,10 +187,6 @@ buf_read_ahead_random(
ulint i;
ulint buf_read_ahead_random_area;
if (!(srv_read_ahead & 1)) {
return(0);
}
if (srv_startup_is_before_trx_rollback_phase) {
/* No read-ahead to avoid thread deadlocks */
return(0);
......@@ -416,10 +412,6 @@ buf_read_ahead_linear(
const ulint buf_read_ahead_linear_area
= BUF_READ_AHEAD_LINEAR_AREA;
if (!(srv_read_ahead & 2)) {
return(0);
}
if (UNIV_UNLIKELY(srv_startup_is_before_trx_rollback_phase)) {
/* No read-ahead to avoid thread deadlocks */
return(0);
......
......@@ -4816,30 +4816,3 @@ fil_page_get_type(
return(mach_read_from_2(page + FIL_PAGE_TYPE));
}
/*************************************************************************
Return local hash table informations. */
ulint
fil_system_hash_cells(void)
/*=======================*/
{
if (fil_system) {
return (fil_system->spaces->n_cells
+ fil_system->name_hash->n_cells);
} else {
return 0;
}
}
ulint
fil_system_hash_nodes(void)
/*=======================*/
{
if (fil_system) {
return (UT_LIST_GET_LEN(fil_system->space_list)
* (sizeof(fil_space_t) + MEM_BLOCK_HEADER_SIZE));
} else {
return 0;
}
}
......@@ -137,7 +137,6 @@ static long innobase_mirrored_log_groups, innobase_log_files_in_group,
innobase_force_recovery, innobase_open_files,
innobase_autoinc_lock_mode;
static unsigned long innobase_read_io_threads, innobase_write_io_threads;
static long long innobase_buffer_pool_size, innobase_log_file_size;
/* The default values for the following char* start-up parameters
......@@ -2067,8 +2066,6 @@ innobase_init(
srv_mem_pool_size = (ulint) innobase_additional_mem_pool_size;
srv_n_file_io_threads = (ulint) innobase_file_io_threads;
srv_n_read_io_threads = (ulint) innobase_read_io_threads;
srv_n_write_io_threads = (ulint) innobase_write_io_threads;
srv_force_recovery = (ulint) innobase_force_recovery;
......@@ -9409,16 +9406,6 @@ static MYSQL_SYSVAR_BOOL(locks_unsafe_for_binlog, innobase_locks_unsafe_for_binl
"Force InnoDB to not use next-key locking, to use only row-level locking.",
NULL, NULL, FALSE);
static MYSQL_SYSVAR_ULONG(show_verbose_locks, srv_show_verbose_locks,
PLUGIN_VAR_OPCMDARG,
"Whether to show records locked in SHOW INNODB STATUS.",
NULL, NULL, 0, 0, 1, 0);
static MYSQL_SYSVAR_ULONG(show_locks_held, srv_show_locks_held,
PLUGIN_VAR_RQCMDARG,
"Number of locks held to print for each InnoDB transaction in SHOW INNODB STATUS.",
NULL, NULL, 10, 0, 1000, 0);
#ifdef UNIV_LOG_ARCHIVE
static MYSQL_SYSVAR_STR(log_arch_dir, innobase_log_arch_dir,
PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY,
......@@ -9569,32 +9556,7 @@ static MYSQL_SYSVAR_LONG(autoinc_lock_mode, innobase_autoinc_lock_mode,
static MYSQL_SYSVAR_STR(version, innodb_version_str,
PLUGIN_VAR_NOCMDOPT | PLUGIN_VAR_READONLY,
"Percona-InnoDB-plugin version", NULL, NULL, INNODB_VERSION_STR);
static MYSQL_SYSVAR_ULONG(io_capacity, srv_io_capacity,
PLUGIN_VAR_RQCMDARG,
"Number of IO operations per second the server can do. Tunes background IO rate.",
NULL, NULL, 100, 100, 999999999, 0);
static MYSQL_SYSVAR_ULONG(read_ahead, srv_read_ahead,
PLUGIN_VAR_RQCMDARG,
"Enable/Diasable read aheads bit0:random bit1:linear",
NULL, NULL, 3, 0, 3, 0);
static MYSQL_SYSVAR_ULONG(adaptive_checkpoint, srv_adaptive_checkpoint,
PLUGIN_VAR_RQCMDARG,
"Enable/Disable flushing along modified age 0:disable 1:enable",
NULL, NULL, 0, 0, 1, 0);
static MYSQL_SYSVAR_ULONG(read_io_threads, innobase_read_io_threads,
PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY,
"Number of background read I/O threads in InnoDB.",
NULL, NULL, 1, 1, 64, 0);
static MYSQL_SYSVAR_ULONG(write_io_threads, innobase_write_io_threads,
PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY,
"Number of background write I/O threads in InnoDB.",
NULL, NULL, 1, 1, 64, 0);
"InnoDB version", NULL, NULL, INNODB_VERSION_STR);
static struct st_mysql_sys_var* innobase_system_variables[]= {
MYSQL_SYSVAR(additional_mem_pool_size),
......@@ -9641,14 +9603,7 @@ static struct st_mysql_sys_var* innobase_system_variables[]= {
MYSQL_SYSVAR(thread_concurrency),
MYSQL_SYSVAR(thread_sleep_delay),
MYSQL_SYSVAR(autoinc_lock_mode),
MYSQL_SYSVAR(show_verbose_locks),
MYSQL_SYSVAR(show_locks_held),
MYSQL_SYSVAR(version),
MYSQL_SYSVAR(io_capacity),
MYSQL_SYSVAR(read_ahead),
MYSQL_SYSVAR(adaptive_checkpoint),
MYSQL_SYSVAR(read_io_threads),
MYSQL_SYSVAR(write_io_threads),
NULL
};
......
......@@ -24,8 +24,5 @@ struct innodb_enhancement {
const char *link;
}innodb_enhancements[] = {
{"innodb_show_enhancements","I_S.PERCONA_INNODB_ENHANCEMENTS","","http://www.percona.com/docs/wiki/percona-innodb-plugin:innodb_show_enhancements"},
{"innodb_show_status","Improvements to SHOW INNODB STATUS","Memory information and lock info fixes","http://www.percona.com/docs/wiki/percona-innodb-plugin:innodb_show_status"},
{"innodb_io","Improvements to InnoDB IO","","http://www.percona.com/docs/wiki/percona-innodb-plugin:innodb_io"},
{"innodb_rw_lock","InnoDB RW-lock fixes","Useful for 8+ cores SMP systems","http://www.percona.com/docs/wiki/percona-innodb-plugin:innodb_rw_lock"},
{NULL, NULL, NULL, NULL}
{NULL, NULL, NULL, NULL, NULL, NULL}
};
......@@ -696,16 +696,6 @@ fil_page_get_type(
return value not defined */
const byte* page); /* in: file page */
/*************************************************************************
Return local hash table informations. */
ulint
fil_system_hash_cells(void);
/*========================*/
ulint
fil_system_hash_nodes(void);
/*========================*/
typedef struct fil_space_struct fil_space_t;
......
......@@ -561,10 +561,8 @@ os_aio_init(
/*========*/
ulint n, /* in: maximum number of pending aio operations
allowed; n must be divisible by n_segments */
// ulint n_segments, /* in: combined number of segments in the four
// first aio arrays; must be >= 4 */
ulint n_read_threads, /* n_segments == 2 + n_read_threads + n_write_threads */
ulint n_write_threads, /**/
ulint n_segments, /* in: combined number of segments in the four
first aio arrays; must be >= 4 */
ulint n_slots_sync); /* in: number of slots in the sync aio array */
/***********************************************************************
Requests an asynchronous i/o operation. */
......
......@@ -90,9 +90,6 @@ extern ulint srv_log_file_size;
extern ulint srv_log_buffer_size;
extern ulong srv_flush_log_at_trx_commit;
extern ulint srv_show_locks_held;
extern ulint srv_show_verbose_locks;
/* The sort order table of the MySQL latin1_swedish_ci character set
collation */
extern const byte* srv_latin1_ordering;
......@@ -103,8 +100,6 @@ extern ulint srv_mem_pool_size;
extern ulint srv_lock_table_size;
extern ulint srv_n_file_io_threads;
extern ulint srv_n_read_io_threads;
extern ulint srv_n_write_io_threads;
#ifdef UNIV_LOG_ARCHIVE
extern ibool srv_log_archive_on;
......@@ -149,11 +144,6 @@ extern ulong srv_max_buf_pool_modified_pct;
extern ulong srv_max_purge_lag;
extern ulong srv_replication_delay;
extern ulint srv_io_capacity;
extern ulint srv_read_ahead;
extern ulint srv_adaptive_checkpoint;
/*-------------------------------------------*/
extern ulint srv_n_rows_inserted;
......
......@@ -328,17 +328,7 @@ rw_lock_get_x_lock_count(
Accessor functions for rw lock. */
UNIV_INLINE
ulint
rw_lock_get_s_waiters(
/*==================*/
rw_lock_t* lock);
UNIV_INLINE
ulint
rw_lock_get_x_waiters(
/*==================*/
rw_lock_t* lock);
UNIV_INLINE
ulint
rw_lock_get_wx_waiters(
rw_lock_get_waiters(
/*================*/
rw_lock_t* lock);
UNIV_INLINE
......@@ -422,11 +412,6 @@ rw_lock_debug_print(
rw_lock_debug_t* info); /* in: debug struct */
#endif /* UNIV_SYNC_DEBUG */
#ifdef HAVE_GCC_ATOMIC_BUILTINS
/* This value means NOT_LOCKED */
#define RW_LOCK_BIAS 0x00100000
#endif
/* NOTE! The structure appears here only for the compiler to know its size.
Do not use its fields directly! The structure used in the spin lock
implementation of a read-write lock. Several threads may have a shared lock
......@@ -436,9 +421,9 @@ blocked by readers, a writer may queue for the lock by setting the writer
field. Then no new readers are allowed in. */
struct rw_lock_struct {
/* Used by sync0arr.c for thread queueing */
os_event_t s_event; /* Used for s_lock */
os_event_t x_event; /* Used for x_lock */
os_event_t event; /* Used by sync0arr.c for thread queueing */
#ifdef __WIN__
os_event_t wait_ex_event; /* This windows specific event is
used by the thread which has set the
lock state to RW_LOCK_WAIT_EX. The
......@@ -446,34 +431,30 @@ struct rw_lock_struct {
thread will be the next one to proceed
once the current the event gets
signalled. See LEMMA 2 in sync0sync.c */
#ifdef HAVE_GCC_ATOMIC_BUILTINS
volatile lint lock_word; /* Used by using atomic builtin */
#endif
volatile ulint reader_count; /* Number of readers who have locked this
ulint reader_count; /* Number of readers who have locked this
lock in the shared mode */
volatile ulint writer; /* This field is set to RW_LOCK_EX if there
ulint writer; /* This field is set to RW_LOCK_EX if there
is a writer owning the lock (in exclusive
mode), RW_LOCK_WAIT_EX if a writer is
queueing for the lock, and
RW_LOCK_NOT_LOCKED, otherwise. */
volatile os_thread_id_t writer_thread;
os_thread_id_t writer_thread;
/* Thread id of a possible writer thread */
volatile ulint writer_count; /* Number of times the same thread has
ulint writer_count; /* Number of times the same thread has
recursively locked the lock in the exclusive
mode */
#ifndef HAVE_GCC_ATOMIC_BUILTINS
mutex_t mutex; /* The mutex protecting rw_lock_struct */
#endif
ulint pass; /* Default value 0. This is set to some
value != 0 given by the caller of an x-lock
operation, if the x-lock is to be passed to
another thread to unlock (which happens in
asynchronous i/o). */
volatile ulint s_waiters; /* 1: there are waiters (s_lock) */
volatile ulint x_waiters; /* 1: there are waiters (x_lock) */
volatile ulint wait_ex_waiters; /* 1: there are waiters (wait_ex) */
ulint waiters; /* This ulint is set to 1 if there are
waiters (readers or writers) in the global
wait array, waiting for this rw_lock.
Otherwise, == 0. */
UT_LIST_NODE_T(rw_lock_t) list;
/* All allocated rw locks are put into a
list */
......@@ -486,7 +467,7 @@ struct rw_lock_struct {
const char* cfile_name;/* File name where lock created */
const char* last_s_file_name;/* File name where last s-locked */
const char* last_x_file_name;/* File name where last x-locked */
volatile ibool writer_is_wait_ex;
ibool writer_is_wait_ex;
/* This is TRUE if the writer field is
RW_LOCK_WAIT_EX; this field is located far
from the memory update hotspot fields which
......
......@@ -47,52 +47,20 @@ rw_lock_remove_debug_info(
Accessor functions for rw lock. */
UNIV_INLINE
ulint
rw_lock_get_s_waiters(
rw_lock_get_waiters(
/*================*/
rw_lock_t* lock)
{
return(lock->s_waiters);
}
UNIV_INLINE
ulint
rw_lock_get_x_waiters(
/*================*/
rw_lock_t* lock)
{
return(lock->x_waiters);
}
UNIV_INLINE
ulint
rw_lock_get_wx_waiters(
/*================*/
rw_lock_t* lock)
{
return(lock->wait_ex_waiters);
return(lock->waiters);
}
UNIV_INLINE
void
rw_lock_set_s_waiters(
rw_lock_t* lock,
ulint flag)
{
lock->s_waiters = flag;
}
UNIV_INLINE
void
rw_lock_set_x_waiters(
rw_lock_set_waiters(
/*================*/
rw_lock_t* lock,
ulint flag)
{
lock->x_waiters = flag;
}
UNIV_INLINE
void
rw_lock_set_wx_waiters(
/*================*/
rw_lock_t* lock,
ulint flag)
{
lock->wait_ex_waiters = flag;
lock->waiters = flag;
}
UNIV_INLINE
ulint
......@@ -100,19 +68,7 @@ rw_lock_get_writer(
/*===============*/
rw_lock_t* lock)
{
#ifdef HAVE_GCC_ATOMIC_BUILTINS
if (lock->writer == RW_LOCK_NOT_LOCKED) {
return(RW_LOCK_NOT_LOCKED);
}
if (lock->writer_is_wait_ex) {
return(RW_LOCK_WAIT_EX);
} else {
return(RW_LOCK_EX);
}
#else
return(lock->writer);
#endif
}
UNIV_INLINE
void
......@@ -140,7 +96,6 @@ rw_lock_set_reader_count(
{
lock->reader_count = count;
}
#ifndef HAVE_GCC_ATOMIC_BUILTINS
UNIV_INLINE
mutex_t*
rw_lock_get_mutex(
......@@ -149,7 +104,6 @@ rw_lock_get_mutex(
{
return(&(lock->mutex));
}
#endif
/**********************************************************************
Returns the value of writer_count for the lock. Does not reserve the lock
......@@ -179,27 +133,13 @@ rw_lock_s_lock_low(
const char* file_name, /* in: file name where lock requested */
ulint line) /* in: line where requested */
{
#ifndef HAVE_GCC_ATOMIC_BUILTINS
ut_ad(mutex_own(rw_lock_get_mutex(lock)));
#endif
/* Check if the writer field is free */
#ifdef HAVE_GCC_ATOMIC_BUILTINS
if (UNIV_LIKELY(rw_lock_get_writer(lock) == RW_LOCK_NOT_LOCKED)) {
/* try s-lock */
if(__sync_sub_and_fetch(&(lock->lock_word),1) <= 0) {
/* fail */
__sync_fetch_and_add(&(lock->lock_word),1);
return(FALSE); /* locking did not succeed */
}
/* success */
__sync_fetch_and_add(&(lock->reader_count),1);
#else
if (UNIV_LIKELY(lock->writer == RW_LOCK_NOT_LOCKED)) {
/* Set the shared lock by incrementing the reader count */
lock->reader_count++;
#endif
#ifdef UNIV_SYNC_DEBUG
rw_lock_add_debug_info(lock, pass, RW_LOCK_SHARED, file_name,
......@@ -226,15 +166,11 @@ rw_lock_s_lock_direct(
const char* file_name, /* in: file name where requested */
ulint line) /* in: line where lock requested */
{
ut_ad(rw_lock_get_writer(lock) == RW_LOCK_NOT_LOCKED);
ut_ad(lock->writer == RW_LOCK_NOT_LOCKED);
ut_ad(rw_lock_get_reader_count(lock) == 0);
/* Set the shared lock by incrementing the reader count */
#ifdef HAVE_GCC_ATOMIC_BUILTINS
__sync_fetch_and_add(&(lock->reader_count),1);
#else
lock->reader_count++;
#endif
lock->last_s_file_name = file_name;
lock->last_s_line = line;
......@@ -262,11 +198,7 @@ rw_lock_x_lock_direct(
rw_lock_set_writer(lock, RW_LOCK_EX);
lock->writer_thread = os_thread_get_curr_id();
#ifdef HAVE_GCC_ATOMIC_BUILTINS
__sync_fetch_and_add(&(lock->writer_count),1);
#else
lock->writer_count++;
#endif
lock->pass = 0;
lock->last_x_file_name = file_name;
......@@ -308,21 +240,15 @@ rw_lock_s_lock_func(
ut_ad(!rw_lock_own(lock, RW_LOCK_SHARED)); /* see NOTE above */
#endif /* UNIV_SYNC_DEBUG */
#ifndef HAVE_GCC_ATOMIC_BUILTINS
mutex_enter(rw_lock_get_mutex(lock));
#endif
if (UNIV_LIKELY(rw_lock_s_lock_low(lock, pass, file_name, line))) {
#ifndef HAVE_GCC_ATOMIC_BUILTINS
mutex_exit(rw_lock_get_mutex(lock));
#endif
return; /* Success */
} else {
/* Did not succeed, try spin wait */
#ifndef HAVE_GCC_ATOMIC_BUILTINS
mutex_exit(rw_lock_get_mutex(lock));
#endif
rw_lock_s_lock_spin(lock, pass, file_name, line);
......@@ -345,23 +271,11 @@ rw_lock_s_lock_func_nowait(
{
ibool success = FALSE;
#ifdef HAVE_GCC_ATOMIC_BUILTINS
if (rw_lock_get_writer(lock) == RW_LOCK_NOT_LOCKED) {
/* try s-lock */
if(__sync_sub_and_fetch(&(lock->lock_word),1) <= 0) {
/* fail */
__sync_fetch_and_add(&(lock->lock_word),1);
return(FALSE); /* locking did not succeed */
}
/* success */
__sync_fetch_and_add(&(lock->reader_count),1);
#else
mutex_enter(rw_lock_get_mutex(lock));
if (lock->writer == RW_LOCK_NOT_LOCKED) {
/* Set the shared lock by incrementing the reader count */
lock->reader_count++;
#endif
#ifdef UNIV_SYNC_DEBUG
rw_lock_add_debug_info(lock, 0, RW_LOCK_SHARED, file_name,
......@@ -374,9 +288,7 @@ rw_lock_s_lock_func_nowait(
success = TRUE;
}
#ifndef HAVE_GCC_ATOMIC_BUILTINS
mutex_exit(rw_lock_get_mutex(lock));
#endif
return(success);
}
......@@ -396,55 +308,6 @@ rw_lock_x_lock_func_nowait(
{
ibool success = FALSE;
os_thread_id_t curr_thread = os_thread_get_curr_id();
#ifdef HAVE_GCC_ATOMIC_BUILTINS
if ((lock->lock_word == RW_LOCK_BIAS)
&& rw_lock_get_writer(lock) == RW_LOCK_NOT_LOCKED) {
/* try x-lock */
if(__sync_sub_and_fetch(&(lock->lock_word),
RW_LOCK_BIAS) == 0) {
/* success */
/* try to lock writer */
if(__sync_lock_test_and_set(&(lock->writer),RW_LOCK_EX)
== RW_LOCK_NOT_LOCKED) {
/* success */
lock->writer_thread = curr_thread;
lock->pass = 0;
lock->writer_is_wait_ex = FALSE;
/* next function may work as memory barrier */
relock:
__sync_fetch_and_add(&(lock->writer_count),1);
#ifdef UNIV_SYNC_DEBUG
rw_lock_add_debug_info(lock, 0, RW_LOCK_EX, file_name, line);
#endif
lock->last_x_file_name = file_name;
lock->last_x_line = line;
ut_ad(rw_lock_validate(lock));
return(TRUE);
} else {
/* x-unlock */
__sync_fetch_and_add(&(lock->lock_word),
RW_LOCK_BIAS);
}
} else {
/* fail (x-lock) */
__sync_fetch_and_add(&(lock->lock_word),RW_LOCK_BIAS);
}
}
if (lock->pass == 0
&& os_thread_eq(lock->writer_thread, curr_thread)
&& rw_lock_get_writer(lock) == RW_LOCK_EX) {
goto relock;
}
ut_ad(rw_lock_validate(lock));
return(FALSE);
#else
mutex_enter(rw_lock_get_mutex(lock));
if (UNIV_UNLIKELY(rw_lock_get_reader_count(lock) != 0)) {
......@@ -475,7 +338,6 @@ relock:
ut_ad(rw_lock_validate(lock));
return(success);
#endif
}
/**********************************************************************
......@@ -491,33 +353,16 @@ rw_lock_s_unlock_func(
#endif
)
{
#ifndef HAVE_GCC_ATOMIC_BUILTINS
mutex_t* mutex = &(lock->mutex);
#endif
ibool x_sg = FALSE;
ibool wx_sg = FALSE;
#ifdef HAVE_GCC_ATOMIC_BUILTINS
ibool last = FALSE;
#endif
ibool sg = FALSE;
#ifndef HAVE_GCC_ATOMIC_BUILTINS
/* Acquire the mutex protecting the rw-lock fields */
mutex_enter(mutex);
#endif
/* Reset the shared lock by decrementing the reader count */
ut_a(lock->reader_count > 0);
#ifdef HAVE_GCC_ATOMIC_BUILTINS
/* unlock lock_word */
__sync_fetch_and_add(&(lock->lock_word),1);
if(__sync_sub_and_fetch(&(lock->reader_count),1) == 0) {
last = TRUE;
}
#else
lock->reader_count--;
#endif
#ifdef UNIV_SYNC_DEBUG
rw_lock_remove_debug_info(lock, pass, RW_LOCK_SHARED);
......@@ -526,36 +371,20 @@ rw_lock_s_unlock_func(
/* If there may be waiters and this was the last s-lock,
signal the object */
#ifdef HAVE_GCC_ATOMIC_BUILTINS
if (UNIV_UNLIKELY(last && lock->wait_ex_waiters)) {
#else
if (UNIV_UNLIKELY(lock->wait_ex_waiters)
if (UNIV_UNLIKELY(lock->waiters)
&& lock->reader_count == 0) {
#endif
wx_sg = TRUE;
sg = TRUE;
rw_lock_set_wx_waiters(lock, 0);
rw_lock_set_waiters(lock, 0);
}
#ifdef HAVE_GCC_ATOMIC_BUILTINS
else if (UNIV_UNLIKELY(last && lock->x_waiters)) {
#else
else if (UNIV_UNLIKELY(lock->x_waiters)
&& lock->reader_count == 0) {
#endif
x_sg = TRUE;
rw_lock_set_x_waiters(lock, 0);
}
#ifndef HAVE_GCC_ATOMIC_BUILTINS
mutex_exit(mutex);
#endif
if (UNIV_UNLIKELY(wx_sg)) {
if (UNIV_UNLIKELY(sg)) {
#ifdef __WIN__
os_event_set(lock->wait_ex_event);
sync_array_object_signalled(sync_primary_wait_array);
} else if (UNIV_UNLIKELY(x_sg)) {
os_event_set(lock->x_event);
#endif
os_event_set(lock->event);
sync_array_object_signalled(sync_primary_wait_array);
}
......@@ -579,19 +408,13 @@ rw_lock_s_unlock_direct(
ut_ad(lock->reader_count > 0);
#ifdef HAVE_GCC_ATOMIC_BUILTINS
__sync_sub_and_fetch(&(lock->reader_count),1);
#else
lock->reader_count--;
#endif
#ifdef UNIV_SYNC_DEBUG
rw_lock_remove_debug_info(lock, 0, RW_LOCK_SHARED);
#endif
ut_ad(!lock->s_waiters);
ut_ad(!lock->x_waiters);
ut_ad(!lock->wait_ex_waiters);
ut_ad(!lock->waiters);
ut_ad(rw_lock_validate(lock));
#ifdef UNIV_SYNC_PERF_STAT
rw_s_exit_count++;
......@@ -611,81 +434,41 @@ rw_lock_x_unlock_func(
#endif
)
{
#ifdef HAVE_GCC_ATOMIC_BUILTINS
ibool last = FALSE;
#endif
ibool s_sg = FALSE;
ibool x_sg = FALSE;
ibool sg = FALSE;
#ifndef HAVE_GCC_ATOMIC_BUILTINS
/* Acquire the mutex protecting the rw-lock fields */
mutex_enter(&(lock->mutex));
#endif
/* Reset the exclusive lock if this thread no longer has an x-mode
lock */
ut_ad(lock->writer_count > 0);
#ifdef HAVE_GCC_ATOMIC_BUILTINS
if(__sync_sub_and_fetch(&(lock->writer_count),1) == 0) {
last = TRUE;
}
if (last) {
/* unlock lock_word */
__sync_fetch_and_add(&(lock->lock_word),RW_LOCK_BIAS);
/* FIXME: It is a value of bad manners for pthread.
But we shouldn't keep an ID of not-owner. */
lock->writer_thread = -1;
/* atomic operation may be safer about memory order. */
rw_lock_set_writer(lock, RW_LOCK_NOT_LOCKED);
__sync_synchronize();
}
#else
lock->writer_count--;
if (lock->writer_count == 0) {
rw_lock_set_writer(lock, RW_LOCK_NOT_LOCKED);
}
#endif
#ifdef UNIV_SYNC_DEBUG
rw_lock_remove_debug_info(lock, pass, RW_LOCK_EX);
#endif
/* If there may be waiters, signal the lock */
#ifdef HAVE_GCC_ATOMIC_BUILTINS
if (last) {
#else
if (lock->writer_count == 0) {
#endif
if(lock->s_waiters){
s_sg = TRUE;
rw_lock_set_s_waiters(lock, 0);
}
if(lock->x_waiters){
x_sg = TRUE;
rw_lock_set_x_waiters(lock, 0);
}
if (UNIV_UNLIKELY(lock->waiters)
&& lock->writer_count == 0) {
sg = TRUE;
rw_lock_set_waiters(lock, 0);
}
#ifndef HAVE_GCC_ATOMIC_BUILTINS
mutex_exit(&(lock->mutex));
#endif
if (UNIV_UNLIKELY(s_sg)) {
os_event_set(lock->s_event);
sync_array_object_signalled(sync_primary_wait_array);
}
if (UNIV_UNLIKELY(x_sg)) {
if (UNIV_UNLIKELY(sg)) {
#ifdef __WIN__
/* I doubt the necessity of it. */
os_event_set(lock->wait_ex_event);
#endif
os_event_set(lock->x_event);
os_event_set(lock->event);
sync_array_object_signalled(sync_primary_wait_array);
}
......@@ -710,13 +493,9 @@ rw_lock_x_unlock_direct(
ut_ad(lock->writer_count > 0);
#ifdef HAVE_GCC_ATOMIC_BUILTINS
if(__sync_sub_and_fetch(&(lock->writer_count),1) == 0) {
#else
lock->writer_count--;
if (lock->writer_count == 0) {
#endif
rw_lock_set_writer(lock, RW_LOCK_NOT_LOCKED);
}
......@@ -724,9 +503,7 @@ rw_lock_x_unlock_direct(
rw_lock_remove_debug_info(lock, 0, RW_LOCK_EX);
#endif
ut_ad(!lock->s_waiters);
ut_ad(!lock->x_waiters);
ut_ad(!lock->wait_ex_waiters);
ut_ad(!lock->waiters);
ut_ad(rw_lock_validate(lock));
#ifdef UNIV_SYNC_PERF_STAT
......
......@@ -60,17 +60,6 @@ thr_local_get_in_ibuf_field(void);
/*=============================*/
/* out: pointer to the in_ibuf field */
/*************************************************************************
Return local hash table informations. */
ulint
thr_local_hash_cells(void);
/*=======================*/
ulint
thr_local_hash_nodes(void);
/*=======================*/
#ifndef UNIV_NONINL
#include "thr0loc.ic"
#endif
......
......@@ -12,7 +12,6 @@ Created 1/20/1994 Heikki Tuuri
#define INNODB_VERSION_MAJOR 1
#define INNODB_VERSION_MINOR 0
#define INNODB_VERSION_BUGFIX 2
#define PERCONA_INNODB_VERSION 1
/* The following is the InnoDB version as shown in
SELECT plugin_version FROM information_schema.plugins;
......@@ -24,14 +23,13 @@ component, i.e. we show M.N.P as M.N */
(INNODB_VERSION_MAJOR << 8 | INNODB_VERSION_MINOR)
/* auxiliary macros to help creating the version as string */
#define __INNODB_VERSION(a, b, c, d) (#a "." #b "." #c "-" #d)
#define _INNODB_VERSION(a, b, c, d) __INNODB_VERSION(a, b, c, d)
#define __INNODB_VERSION(a, b, c) (#a "." #b "." #c)
#define _INNODB_VERSION(a, b, c) __INNODB_VERSION(a, b, c)
#define INNODB_VERSION_STR \
_INNODB_VERSION(INNODB_VERSION_MAJOR, \
INNODB_VERSION_MINOR, \
INNODB_VERSION_BUGFIX, \
PERCONA_INNODB_VERSION)
INNODB_VERSION_BUGFIX)
#ifdef MYSQL_DYNAMIC_PLUGIN
/* In the dynamic plugin, redefine some externally visible symbols
......
......@@ -4294,32 +4294,32 @@ lock_rec_print(
putc('\n', file);
if ( srv_show_verbose_locks ) {
block = buf_page_try_get(space, page_no, &mtr);
if (block) {
for (i = 0; i < lock_rec_get_n_bits(lock); i++) {
if (lock_rec_get_nth_bit(lock, i)) {
const rec_t* rec
= page_find_rec_with_heap_no(
buf_block_get_frame(block), i);
offsets = rec_get_offsets(
rec, lock->index, offsets,
ULINT_UNDEFINED, &heap);
fprintf(file, "Record lock, heap no %lu ",
(ulong) i);
rec_print_new(file, rec, offsets);
putc('\n', file);
}
}
} else {
for (i = 0; i < lock_rec_get_n_bits(lock); i++) {
fprintf(file, "Record lock, heap no %lu\n", (ulong) i);
block = buf_page_try_get(space, page_no, &mtr);
if (block) {
for (i = 0; i < lock_rec_get_n_bits(lock); i++) {
if (lock_rec_get_nth_bit(lock, i)) {
const rec_t* rec
= page_find_rec_with_heap_no(
buf_block_get_frame(block), i);
offsets = rec_get_offsets(
rec, lock->index, offsets,
ULINT_UNDEFINED, &heap);
fprintf(file, "Record lock, heap no %lu ",
(ulong) i);
rec_print_new(file, rec, offsets);
putc('\n', file);
}
}
} else {
for (i = 0; i < lock_rec_get_n_bits(lock); i++) {
fprintf(file, "Record lock, heap no %lu\n", (ulong) i);
}
}
mtr_commit(&mtr);
if (UNIV_LIKELY_NULL(heap)) {
mem_heap_free(heap);
......@@ -4498,7 +4498,7 @@ loop:
}
}
if (!srv_print_innodb_lock_monitor && !srv_show_locks_held) {
if (!srv_print_innodb_lock_monitor) {
nth_trx++;
goto loop;
}
......@@ -4557,8 +4557,8 @@ loop:
nth_lock++;
if (nth_lock >= srv_show_locks_held) {
fputs("TOO LOCKS PRINTED FOR THIS TRX:"
if (nth_lock >= 10) {
fputs("10 LOCKS PRINTED FOR THIS TRX:"
" SUPPRESSING FURTHER PRINTS\n",
file);
......
......@@ -3258,15 +3258,6 @@ log_print(
log_sys->flushed_to_disk_lsn,
log_sys->last_checkpoint_lsn);
fprintf(file,
"Max checkpoint age %lu\n"
"Modified age %lu\n"
"Checkpoint age %lu\n",
(ulong) log_sys->max_checkpoint_age,
(ulong) (log_sys->lsn -
log_buf_pool_get_oldest_modification()),
(ulong) (log_sys->lsn - log_sys->last_checkpoint_lsn));
current_time = time(NULL);
time_elapsed = 0.001 + difftime(current_time,
......
......@@ -2920,10 +2920,8 @@ os_aio_init(
/*========*/
ulint n, /* in: maximum number of pending aio operations
allowed; n must be divisible by n_segments */
// ulint n_segments, /* in: combined number of segments in the four
// first aio arrays; must be >= 4 */
ulint n_read_threads, /* n_segments == 2 + n_read_threads + n_write_threads*/
ulint n_write_threads, /**/
ulint n_segments, /* in: combined number of segments in the four
first aio arrays; must be >= 4 */
ulint n_slots_sync) /* in: number of slots in the sync aio array */
{
ulint n_read_segs;
......@@ -2931,8 +2929,6 @@ os_aio_init(
ulint n_per_seg;
ulint i;
ulint n_segments = 2 + n_read_threads + n_write_threads;
ut_ad(n % n_segments == 0);
ut_ad(n_segments >= 4);
......@@ -2943,8 +2939,8 @@ os_aio_init(
}
n_per_seg = n / n_segments;
n_write_segs = n_write_threads;
n_read_segs = n_read_threads;
n_write_segs = (n_segments - 2) / 2;
n_read_segs = n_segments - 2 - n_write_segs;
/* fprintf(stderr, "Array n per seg %lu\n", n_per_seg); */
......@@ -3144,13 +3140,6 @@ os_aio_array_reserve_slot(
OVERLAPPED* control;
#endif
ulint i;
ulint prim_segment;
ulint n;
n = array->n_slots / array->n_segments;
/* 64 blocks' striping ( aligning max(BUF_READ_AHEAD_AREA) ) */
prim_segment = ( offset >> (UNIV_PAGE_SIZE_SHIFT + 6) ) % (array->n_segments);
loop:
os_mutex_enter(array->mutex);
......@@ -3169,16 +3158,6 @@ loop:
goto loop;
}
for (i = prim_segment * n; i < array->n_slots; i++) {
slot = os_aio_array_get_nth_slot(array, i);
if (slot->reserved == FALSE) {
break;
}
}
if (slot->reserved == TRUE){
/* Not found after the intended segment. So we should search before. */
for (i = 0;; i++) {
slot = os_aio_array_get_nth_slot(array, i);
......@@ -3186,7 +3165,6 @@ loop:
break;
}
}
}
array->n_reserved++;
......
......@@ -7,3 +7,4 @@ INSTALL PLUGIN innodb_cmp SONAME 'ha_innodb.so';
INSTALL PLUGIN innodb_cmp_reset SONAME 'ha_innodb.so';
INSTALL PLUGIN innodb_cmpmem SONAME 'ha_innodb.so';
INSTALL PLUGIN innodb_cmpmem_reset SONAME 'ha_innodb.so';
INSTALL PLUGIN PERCONA_INNODB_ENHANCEMENTS SONAME 'ha_innodb.so';
......@@ -132,10 +132,6 @@ UNIV_INTERN ulint srv_log_file_size = ULINT_MAX;
UNIV_INTERN ulint srv_log_buffer_size = ULINT_MAX;
UNIV_INTERN ulong srv_flush_log_at_trx_commit = 1;
UNIV_INTERN ulint srv_show_locks_held = 10;
UNIV_INTERN ulint srv_show_verbose_locks = 0;
/* The sort order table of the MySQL latin1_swedish_ci character set
collation */
UNIV_INTERN const byte* srv_latin1_ordering;
......@@ -151,8 +147,6 @@ UNIV_INTERN ulint srv_mem_pool_size = ULINT_MAX;
UNIV_INTERN ulint srv_lock_table_size = ULINT_MAX;
UNIV_INTERN ulint srv_n_file_io_threads = ULINT_MAX;
ulint srv_n_read_io_threads = 1;
ulint srv_n_write_io_threads = 1;
#ifdef UNIV_LOG_ARCHIVE
UNIV_INTERN ibool srv_log_archive_on = FALSE;
......@@ -317,15 +311,6 @@ UNIV_INTERN int srv_query_thread_priority = 0;
UNIV_INTERN ulong srv_replication_delay = 0;
ulint srv_io_capacity = 100;
/* Returns the number of IO operations that is X percent of the capacity.
PCT_IO(5) -> returns the number of IO operations that is 5% of the max
where max is srv_io_capacity. */
#define PCT_IO(pct) ((ulint) (srv_io_capacity * ((double) pct / 100.0)))
ulint srv_read_ahead = 3; /* 1: random 2: linear 3: Both */
ulint srv_adaptive_checkpoint = 0; /* 0:disable 1:enable */
/*-------------------------------------------*/
UNIV_INTERN ulong srv_n_spin_wait_rounds = 20;
UNIV_INTERN ulong srv_n_free_tickets_to_enter = 500;
......@@ -1629,14 +1614,6 @@ srv_printf_innodb_monitor(
time_t current_time;
ulint n_reserved;
ulint btr_search_sys_subtotal;
ulint lock_sys_subtotal;
ulint recv_sys_subtotal;
ulint io_counter_subtotal;
ulint i;
trx_t* trx;
mutex_enter(&srv_innodb_monitor_mutex);
current_time = time(NULL);
......@@ -1680,6 +1657,24 @@ srv_printf_innodb_monitor(
mutex_exit(&dict_foreign_err_mutex);
lock_print_info_summary(file);
if (trx_start) {
long t = ftell(file);
if (t < 0) {
*trx_start = ULINT_UNDEFINED;
} else {
*trx_start = (ulint) t;
}
}
lock_print_info_all_transactions(file);
if (trx_end) {
long t = ftell(file);
if (t < 0) {
*trx_end = ULINT_UNDEFINED;
} else {
*trx_end = (ulint) t;
}
}
fputs("--------\n"
"FILE I/O\n"
"--------\n", file);
......@@ -1710,84 +1705,10 @@ srv_printf_innodb_monitor(
"BUFFER POOL AND MEMORY\n"
"----------------------\n", file);
fprintf(file,
"Total memory allocated " ULINTPF
"; in additional pool allocated " ULINTPF "\n",
ut_total_allocated_memory,
mem_pool_get_reserved(mem_comm_pool));
/* Calcurate reserved memories */
if (btr_search_sys && btr_search_sys->hash_index->heap) {
btr_search_sys_subtotal = mem_heap_get_size(btr_search_sys->hash_index->heap);
} else {
btr_search_sys_subtotal = 0;
for (i=0; i < btr_search_sys->hash_index->n_mutexes; i++) {
btr_search_sys_subtotal += mem_heap_get_size(btr_search_sys->hash_index->heaps[i]);
}
}
lock_sys_subtotal = 0;
if (trx_sys) {
mutex_enter(&kernel_mutex);
trx = UT_LIST_GET_FIRST(trx_sys->mysql_trx_list);
while (trx) {
lock_sys_subtotal += ((trx->lock_heap) ? mem_heap_get_size(trx->lock_heap) : 0);
trx = UT_LIST_GET_NEXT(mysql_trx_list, trx);
}
mutex_exit(&kernel_mutex);
}
recv_sys_subtotal = ((recv_sys && recv_sys->addr_hash)
? mem_heap_get_size(recv_sys->heap) : 0);
fprintf(file,
"Internal hash tables (constant factor + variable factor)\n"
" Adaptive hash index %lu \t(%lu + %lu)\n"
" Page hash %lu\n"
" Dictionary cache %lu \t(%lu + %lu)\n"
" File system %lu \t(%lu + %lu)\n"
" Lock system %lu \t(%lu + %lu)\n"
" Recovery system %lu \t(%lu + %lu)\n"
" Threads %lu \t(%lu + %lu)\n",
(ulong) (btr_search_sys
? (btr_search_sys->hash_index->n_cells * sizeof(hash_cell_t)) : 0)
+ btr_search_sys_subtotal,
(ulong) (btr_search_sys
? (btr_search_sys->hash_index->n_cells * sizeof(hash_cell_t)) : 0),
(ulong) btr_search_sys_subtotal,
(ulong) (buf_pool->page_hash->n_cells * sizeof(hash_cell_t)),
(ulong) (dict_sys ? ((dict_sys->table_hash->n_cells
+ dict_sys->table_id_hash->n_cells
) * sizeof(hash_cell_t)
+ dict_sys->size) : 0),
(ulong) (dict_sys ? ((dict_sys->table_hash->n_cells
+ dict_sys->table_id_hash->n_cells
) * sizeof(hash_cell_t)) : 0),
(ulong) (dict_sys ? (dict_sys->size) : 0),
(ulong) (fil_system_hash_cells() * sizeof(hash_cell_t)
+ fil_system_hash_nodes()),
(ulong) (fil_system_hash_cells() * sizeof(hash_cell_t)),
(ulong) fil_system_hash_nodes(),
(ulong) ((lock_sys ? (lock_sys->rec_hash->n_cells * sizeof(hash_cell_t)) : 0)
+ lock_sys_subtotal),
(ulong) (lock_sys ? (lock_sys->rec_hash->n_cells * sizeof(hash_cell_t)) : 0),
(ulong) lock_sys_subtotal,
(ulong) (((recv_sys && recv_sys->addr_hash)
? (recv_sys->addr_hash->n_cells * sizeof(hash_cell_t)) : 0)
+ recv_sys_subtotal),
(ulong) ((recv_sys && recv_sys->addr_hash)
? (recv_sys->addr_hash->n_cells * sizeof(hash_cell_t)) : 0),
(ulong) recv_sys_subtotal,
(ulong) (thr_local_hash_cells() * sizeof(hash_cell_t)
+ thr_local_hash_nodes()),
(ulong) (thr_local_hash_cells() * sizeof(hash_cell_t)),
(ulong) thr_local_hash_nodes());
"Total memory allocated " ULINTPF
"; in additional pool allocated " ULINTPF "\n",
ut_total_allocated_memory,
mem_pool_get_reserved(mem_comm_pool));
fprintf(file, "Dictionary memory allocated " ULINTPF "\n",
dict_sys->size);
......@@ -1846,25 +1767,6 @@ srv_printf_innodb_monitor(
srv_n_rows_deleted_old = srv_n_rows_deleted;
srv_n_rows_read_old = srv_n_rows_read;
lock_print_info_summary(file);
if (trx_start) {
long t = ftell(file);
if (t < 0) {
*trx_start = ULINT_UNDEFINED;
} else {
*trx_start = (ulint) t;
}
}
lock_print_info_all_transactions(file);
if (trx_end) {
long t = ftell(file);
if (t < 0) {
*trx_end = ULINT_UNDEFINED;
} else {
*trx_end = (ulint) t;
}
}
fputs("----------------------------\n"
"END OF INNODB MONITOR OUTPUT\n"
"============================\n", file);
......@@ -2301,8 +2203,6 @@ srv_master_thread(
ibool skip_sleep = FALSE;
ulint i;
ib_uint64_t oldest_lsn;
#ifdef UNIV_DEBUG_THREAD_CREATION
fprintf(stderr, "Master thread starts, id %lu\n",
os_thread_pf(os_thread_get_curr_id()));
......@@ -2390,10 +2290,10 @@ loop:
+ log_sys->n_pending_writes;
n_ios = log_sys->n_log_ios + buf_pool->n_pages_read
+ buf_pool->n_pages_written;
if (n_pend_ios < 3 && (n_ios - n_ios_old < PCT_IO(5))) {
if (n_pend_ios < 3 && (n_ios - n_ios_old < 5)) {
srv_main_thread_op_info = "doing insert buffer merge";
ibuf_contract_for_n_pages(
TRUE, PCT_IO((srv_insert_buffer_batch_size / 4)));
TRUE, srv_insert_buffer_batch_size / 4);
srv_main_thread_op_info = "flushing log";
......@@ -2406,7 +2306,7 @@ loop:
/* Try to keep the number of modified pages in the
buffer pool under the limit wished by the user */
n_pages_flushed = buf_flush_batch(BUF_FLUSH_LIST, PCT_IO(100),
n_pages_flushed = buf_flush_batch(BUF_FLUSH_LIST, 100,
IB_ULONGLONG_MAX);
/* If we had to do the flush, it may have taken
......@@ -2415,44 +2315,6 @@ loop:
iteration of this loop. */
skip_sleep = TRUE;
} else if (srv_adaptive_checkpoint) {
/* Try to keep modified age not to exceed
max_checkpoint_age * 7/8 line */
mutex_enter(&(log_sys->mutex));
oldest_lsn = buf_pool_get_oldest_modification();
if (oldest_lsn == 0) {
mutex_exit(&(log_sys->mutex));
} else {
if ((log_sys->lsn - oldest_lsn)
> (log_sys->max_checkpoint_age) - ((log_sys->max_checkpoint_age) / 4)) {
/* 2nd defence line (max_checkpoint_age * 3/4) */
mutex_exit(&(log_sys->mutex));
n_pages_flushed = buf_flush_batch(BUF_FLUSH_LIST, PCT_IO(100),
IB_ULONGLONG_MAX);
skip_sleep = TRUE;
} else if ((log_sys->lsn - oldest_lsn)
> (log_sys->max_checkpoint_age)/2 ) {
/* 1st defence line (max_checkpoint_age * 1/2) */
mutex_exit(&(log_sys->mutex));
n_pages_flushed = buf_flush_batch(BUF_FLUSH_LIST, PCT_IO(10),
IB_ULONGLONG_MAX);
skip_sleep = TRUE;
} else {
mutex_exit(&(log_sys->mutex));
}
}
}
if (srv_activity_count == old_activity_count) {
......@@ -2479,10 +2341,10 @@ loop:
n_pend_ios = buf_get_n_pending_ios() + log_sys->n_pending_writes;
n_ios = log_sys->n_log_ios + buf_pool->n_pages_read
+ buf_pool->n_pages_written;
if (n_pend_ios < 3 && (n_ios - n_ios_very_old < PCT_IO(200))) {
srv_main_thread_op_info = "flushing buffer pool pages";
buf_flush_batch(BUF_FLUSH_LIST, PCT_IO(100), IB_ULONGLONG_MAX);
if (n_pend_ios < 3 && (n_ios - n_ios_very_old < 200)) {
srv_main_thread_op_info = "flushing buffer pool pages";
buf_flush_batch(BUF_FLUSH_LIST, 100, IB_ULONGLONG_MAX);
srv_main_thread_op_info = "flushing log";
log_buffer_flush_to_disk();
......@@ -2492,7 +2354,7 @@ loop:
even if the server were active */
srv_main_thread_op_info = "doing insert buffer merge";
ibuf_contract_for_n_pages(TRUE, PCT_IO((srv_insert_buffer_batch_size / 4)));
ibuf_contract_for_n_pages(TRUE, srv_insert_buffer_batch_size / 4);
srv_main_thread_op_info = "flushing log";
log_buffer_flush_to_disk();
......@@ -2532,14 +2394,14 @@ loop:
(> 70 %), we assume we can afford reserving the disk(s) for
the time it requires to flush 100 pages */
n_pages_flushed = buf_flush_batch(BUF_FLUSH_LIST, PCT_IO(100),
n_pages_flushed = buf_flush_batch(BUF_FLUSH_LIST, 100,
IB_ULONGLONG_MAX);
} else {
/* Otherwise, we only flush a small number of pages so that
we do not unnecessarily use much disk i/o capacity from
other work */
n_pages_flushed = buf_flush_batch(BUF_FLUSH_LIST, PCT_IO(10),
n_pages_flushed = buf_flush_batch(BUF_FLUSH_LIST, 10,
IB_ULONGLONG_MAX);
}
......@@ -2627,7 +2489,7 @@ background_loop:
n_bytes_merged = 0;
} else {
n_bytes_merged = ibuf_contract_for_n_pages(
TRUE, PCT_IO((srv_insert_buffer_batch_size * 5)));
TRUE, srv_insert_buffer_batch_size);
}
srv_main_thread_op_info = "reserving kernel mutex";
......@@ -2643,7 +2505,7 @@ flush_loop:
srv_main_thread_op_info = "flushing buffer pool pages";
if (srv_fast_shutdown < 2) {
n_pages_flushed = buf_flush_batch(BUF_FLUSH_LIST, PCT_IO(100),
n_pages_flushed = buf_flush_batch(BUF_FLUSH_LIST, 100,
IB_ULONGLONG_MAX);
} else {
/* In the fastest shutdown we do not flush the buffer pool
......
......@@ -1204,28 +1204,24 @@ innobase_start_or_create_for_mysql(void)
return(DB_ERROR);
}
/* over write innodb_file_io_threads */
srv_n_file_io_threads = 2 + srv_n_read_io_threads + srv_n_write_io_threads;
/* Restrict the maximum number of file i/o threads */
if (srv_n_file_io_threads > SRV_MAX_N_IO_THREADS) {
srv_n_file_io_threads = SRV_MAX_N_IO_THREADS;
srv_n_read_io_threads = srv_n_write_io_threads = (SRV_MAX_N_IO_THREADS - 2) / 2;
}
if (!os_aio_use_native_aio) {
/* In simulated aio we currently have use only for 4 threads */
/*srv_n_file_io_threads = 4;*/
srv_n_file_io_threads = 4;
os_aio_init(8 * SRV_N_PENDING_IOS_PER_THREAD
* srv_n_file_io_threads,
srv_n_read_io_threads, srv_n_write_io_threads,
SRV_MAX_N_PENDING_SYNC_IOS * 8);
srv_n_file_io_threads,
SRV_MAX_N_PENDING_SYNC_IOS);
} else {
os_aio_init(SRV_N_PENDING_IOS_PER_THREAD
* srv_n_file_io_threads,
srv_n_read_io_threads, srv_n_write_io_threads,
srv_n_file_io_threads,
SRV_MAX_N_PENDING_SYNC_IOS);
}
......
......@@ -307,13 +307,13 @@ sync_cell_event_reset(
{
if (type == SYNC_MUTEX) {
return(os_event_reset(((mutex_t *) object)->event));
#ifdef __WIN__
} else if (type == RW_LOCK_WAIT_EX) {
return(os_event_reset(
((rw_lock_t *) object)->wait_ex_event));
} else if (type == RW_LOCK_SHARED) {
return(os_event_reset(((rw_lock_t *) object)->s_event));
} else { /* RW_LOCK_EX */
return(os_event_reset(((rw_lock_t *) object)->x_event));
#endif
} else {
return(os_event_reset(((rw_lock_t *) object)->event));
}
}
......@@ -413,12 +413,15 @@ sync_array_wait_event(
if (cell->request_type == SYNC_MUTEX) {
event = ((mutex_t*) cell->wait_object)->event;
#ifdef __WIN__
/* On windows if the thread about to wait is the one which
has set the state of the rw_lock to RW_LOCK_WAIT_EX, then
it waits on a special event i.e.: wait_ex_event. */
} else if (cell->request_type == RW_LOCK_WAIT_EX) {
event = ((rw_lock_t*) cell->wait_object)->wait_ex_event;
} else if (cell->request_type == RW_LOCK_SHARED) {
event = ((rw_lock_t*) cell->wait_object)->s_event;
#endif
} else {
event = ((rw_lock_t*) cell->wait_object)->x_event;
event = ((rw_lock_t*) cell->wait_object)->event;
}
cell->waiting = TRUE;
......@@ -459,7 +462,6 @@ sync_array_cell_print(
mutex_t* mutex;
rw_lock_t* rwlock;
ulint type;
ulint writer;
type = cell->request_type;
......@@ -489,10 +491,12 @@ sync_array_cell_print(
(ulong) mutex->waiters);
} else if (type == RW_LOCK_EX
#ifdef __WIN__
|| type == RW_LOCK_WAIT_EX
#endif
|| type == RW_LOCK_SHARED) {
fputs(type == RW_LOCK_SHARED ? "S-lock on" : "X-lock on", file);
fputs(type == RW_LOCK_EX ? "X-lock on" : "S-lock on", file);
rwlock = cell->old_wait_rw_lock;
......@@ -500,24 +504,22 @@ sync_array_cell_print(
" RW-latch at %p created in file %s line %lu\n",
(void*) rwlock, rwlock->cfile_name,
(ulong) rwlock->cline);
writer = rw_lock_get_writer(rwlock);
if (writer != RW_LOCK_NOT_LOCKED) {
if (rwlock->writer != RW_LOCK_NOT_LOCKED) {
fprintf(file,
"a writer (thread id %lu) has"
" reserved it in mode %s",
(ulong) os_thread_pf(rwlock->writer_thread),
writer == RW_LOCK_EX
rwlock->writer == RW_LOCK_EX
? " exclusive\n"
: " wait exclusive\n");
}
fprintf(file,
"number of readers %lu, s_waiters flag %lu, x_waiters flag %lu\n"
"number of readers %lu, waiters flag %lu\n"
"Last time read locked in file %s line %lu\n"
"Last time write locked in file %s line %lu\n",
(ulong) rwlock->reader_count,
(ulong) rwlock->s_waiters,
(ulong) (rwlock->x_waiters || rwlock->wait_ex_waiters),
(ulong) rwlock->waiters,
rwlock->last_s_file_name,
(ulong) rwlock->last_s_line,
rwlock->last_x_file_name,
......@@ -842,15 +844,11 @@ sync_array_object_signalled(
/*========================*/
sync_array_t* arr) /* in: wait array */
{
#ifdef HAVE_GCC_ATOMIC_BUILTINS
__sync_fetch_and_add(&(arr->sg_count),1);
#else
sync_array_enter(arr);
arr->sg_count++;
sync_array_exit(arr);
#endif
}
/**************************************************************************
......@@ -891,23 +889,19 @@ sync_arr_wake_threads_if_sema_free(void)
mutex = cell->wait_object;
os_event_set(mutex->event);
#ifdef __WIN__
} else if (cell->request_type
== RW_LOCK_WAIT_EX) {
rw_lock_t* lock;
lock = cell->wait_object;
os_event_set(lock->wait_ex_event);
} else if (cell->request_type
== RW_LOCK_SHARED) {
rw_lock_t* lock;
lock = cell->wait_object;
os_event_set(lock->s_event);
#endif
} else {
rw_lock_t* lock;
rw_lock_t* lock;
lock = cell->wait_object;
os_event_set(lock->x_event);
os_event_set(lock->event);
}
}
}
......
......@@ -119,7 +119,6 @@ rw_lock_create_func(
/* If this is the very first time a synchronization object is
created, then the following call initializes the sync system. */
#ifndef HAVE_GCC_ATOMIC_BUILTINS
mutex_create(rw_lock_get_mutex(lock), SYNC_NO_ORDER_CHECK);
lock->mutex.cfile_name = cfile_name;
......@@ -129,14 +128,8 @@ rw_lock_create_func(
lock->mutex.cmutex_name = cmutex_name;
lock->mutex.mutex_type = 1;
#endif /* UNIV_DEBUG && !UNIV_HOTBACKUP */
#endif /* !HAVE_GCC_ATOMIC_BUILTINS */
#ifdef HAVE_GCC_ATOMIC_BUILTINS
lock->lock_word = RW_LOCK_BIAS;
#endif
rw_lock_set_s_waiters(lock, 0);
rw_lock_set_x_waiters(lock, 0);
rw_lock_set_wx_waiters(lock, 0);
rw_lock_set_waiters(lock, 0);
rw_lock_set_writer(lock, RW_LOCK_NOT_LOCKED);
lock->writer_count = 0;
rw_lock_set_reader_count(lock, 0);
......@@ -158,9 +151,11 @@ rw_lock_create_func(
lock->last_x_file_name = "not yet reserved";
lock->last_s_line = 0;
lock->last_x_line = 0;
lock->s_event = os_event_create(NULL);
lock->x_event = os_event_create(NULL);
lock->event = os_event_create(NULL);
#ifdef __WIN__
lock->wait_ex_event = os_event_create(NULL);
#endif
mutex_enter(&rw_lock_list_mutex);
......@@ -186,21 +181,19 @@ rw_lock_free(
{
ut_ad(rw_lock_validate(lock));
ut_a(rw_lock_get_writer(lock) == RW_LOCK_NOT_LOCKED);
ut_a(rw_lock_get_s_waiters(lock) == 0);
ut_a(rw_lock_get_x_waiters(lock) == 0);
ut_a(rw_lock_get_wx_waiters(lock) == 0);
ut_a(rw_lock_get_waiters(lock) == 0);
ut_a(rw_lock_get_reader_count(lock) == 0);
lock->magic_n = 0;
#ifndef HAVE_GCC_ATOMIC_BUILTINS
mutex_free(rw_lock_get_mutex(lock));
#endif
mutex_enter(&rw_lock_list_mutex);
os_event_free(lock->s_event);
os_event_free(lock->x_event);
os_event_free(lock->event);
#ifdef __WIN__
os_event_free(lock->wait_ex_event);
#endif
if (UT_LIST_GET_PREV(list, lock)) {
ut_a(UT_LIST_GET_PREV(list, lock)->magic_n == RW_LOCK_MAGIC_N);
......@@ -218,8 +211,6 @@ rw_lock_free(
/**********************************************************************
Checks that the rw-lock has been initialized and that there are no
simultaneous shared and exclusive locks. */
/* MEMO: If HAVE_GCC_ATOMIC_BUILTINS, we should use this function statically. */
UNIV_INTERN
ibool
rw_lock_validate(
......@@ -228,9 +219,7 @@ rw_lock_validate(
{
ut_a(lock);
#ifndef HAVE_GCC_ATOMIC_BUILTINS
mutex_enter(rw_lock_get_mutex(lock));
#endif
ut_a(lock->magic_n == RW_LOCK_MAGIC_N);
ut_a((rw_lock_get_reader_count(lock) == 0)
......@@ -238,17 +227,11 @@ rw_lock_validate(
ut_a((rw_lock_get_writer(lock) == RW_LOCK_EX)
|| (rw_lock_get_writer(lock) == RW_LOCK_WAIT_EX)
|| (rw_lock_get_writer(lock) == RW_LOCK_NOT_LOCKED));
ut_a((rw_lock_get_s_waiters(lock) == 0)
|| (rw_lock_get_s_waiters(lock) == 1));
ut_a((rw_lock_get_x_waiters(lock) == 0)
|| (rw_lock_get_x_waiters(lock) == 1));
ut_a((rw_lock_get_wx_waiters(lock) == 0)
|| (rw_lock_get_wx_waiters(lock) == 1));
ut_a((rw_lock_get_waiters(lock) == 0)
|| (rw_lock_get_waiters(lock) == 1));
ut_a((lock->writer != RW_LOCK_EX) || (lock->writer_count > 0));
#ifndef HAVE_GCC_ATOMIC_BUILTINS
mutex_exit(rw_lock_get_mutex(lock));
#endif
return(TRUE);
}
......@@ -275,14 +258,13 @@ rw_lock_s_lock_spin(
ut_ad(rw_lock_validate(lock));
lock_loop:
i = 0;
spin_loop:
rw_s_spin_wait_count++;
/* Spin waiting for the writer field to become free */
i = 0;
while (i < SYNC_SPIN_ROUNDS
&& rw_lock_get_writer(lock) != RW_LOCK_NOT_LOCKED) {
while (rw_lock_get_writer(lock) != RW_LOCK_NOT_LOCKED
&& i < SYNC_SPIN_ROUNDS) {
if (srv_spin_wait_delay) {
ut_delay(ut_rnd_interval(0, srv_spin_wait_delay));
}
......@@ -303,27 +285,15 @@ spin_loop:
lock->cfile_name, (ulong) lock->cline, (ulong) i);
}
#ifndef HAVE_GCC_ATOMIC_BUILTINS
mutex_enter(rw_lock_get_mutex(lock));
#endif
/* We try once again to obtain the lock */
if (TRUE == rw_lock_s_lock_low(lock, pass, file_name, line)) {
#ifndef HAVE_GCC_ATOMIC_BUILTINS
mutex_exit(rw_lock_get_mutex(lock));
#endif
return; /* Success */
} else {
#ifdef HAVE_GCC_ATOMIC_BUILTINS
/* like sync0sync.c doing */
i++;
if (i < SYNC_SPIN_ROUNDS) {
goto spin_loop;
}
#endif
/* If we get here, locking did not succeed, we may
suspend the thread to wait in the wait array */
......@@ -334,19 +304,9 @@ spin_loop:
file_name, line,
&index);
rw_lock_set_s_waiters(lock, 1);
rw_lock_set_waiters(lock, 1);
#ifdef HAVE_GCC_ATOMIC_BUILTINS
/* like sync0sync.c doing */
for (i = 0; i < 4; i++) {
if (TRUE == rw_lock_s_lock_low(lock, pass, file_name, line)) {
sync_array_free_cell(sync_primary_wait_array, index);
return; /* Success */
}
}
#else
mutex_exit(rw_lock_get_mutex(lock));
#endif
if (srv_print_latch_waits) {
fprintf(stderr,
......@@ -383,19 +343,13 @@ rw_lock_x_lock_move_ownership(
{
ut_ad(rw_lock_is_locked(lock, RW_LOCK_EX));
#ifndef HAVE_GCC_ATOMIC_BUILTINS
mutex_enter(&(lock->mutex));
#endif
lock->writer_thread = os_thread_get_curr_id();
lock->pass = 0;
#ifndef HAVE_GCC_ATOMIC_BUILTINS
mutex_exit(&(lock->mutex));
#else
__sync_synchronize();
#endif
}
/**********************************************************************
......@@ -413,89 +367,6 @@ rw_lock_x_lock_low(
const char* file_name,/* in: file name where lock requested */
ulint line) /* in: line where requested */
{
#ifdef HAVE_GCC_ATOMIC_BUILTINS
os_thread_id_t curr_thread = os_thread_get_curr_id();
/* try to lock writer */
if(__sync_lock_test_and_set(&(lock->writer),RW_LOCK_EX)
== RW_LOCK_NOT_LOCKED) {
/* success */
/* obtain RW_LOCK_WAIT_EX right */
lock->writer_thread = curr_thread;
lock->pass = pass;
lock->writer_is_wait_ex = TRUE;
/* atomic operation may be safer about memory order. */
__sync_synchronize();
#ifdef UNIV_SYNC_DEBUG
rw_lock_add_debug_info(lock, pass, RW_LOCK_WAIT_EX,
file_name, line);
#endif
}
if (!os_thread_eq(lock->writer_thread, curr_thread)) {
return(RW_LOCK_NOT_LOCKED);
}
switch(rw_lock_get_writer(lock)) {
case RW_LOCK_WAIT_EX:
/* have right to try x-lock */
if (lock->lock_word == RW_LOCK_BIAS) {
/* try x-lock */
if(__sync_sub_and_fetch(&(lock->lock_word),
RW_LOCK_BIAS) == 0) {
/* success */
lock->pass = pass;
lock->writer_is_wait_ex = FALSE;
__sync_fetch_and_add(&(lock->writer_count),1);
#ifdef UNIV_SYNC_DEBUG
rw_lock_remove_debug_info(lock, pass, RW_LOCK_WAIT_EX);
rw_lock_add_debug_info(lock, pass, RW_LOCK_EX,
file_name, line);
#endif
lock->last_x_file_name = file_name;
lock->last_x_line = line;
/* Locking succeeded, we may return */
return(RW_LOCK_EX);
} else {
/* fail */
__sync_fetch_and_add(&(lock->lock_word),
RW_LOCK_BIAS);
}
}
/* There are readers, we have to wait */
return(RW_LOCK_WAIT_EX);
break;
case RW_LOCK_EX:
/* already have x-lock */
if ((lock->pass == 0)&&(pass == 0)) {
__sync_fetch_and_add(&(lock->writer_count),1);
#ifdef UNIV_SYNC_DEBUG
rw_lock_add_debug_info(lock, pass, RW_LOCK_EX, file_name,
line);
#endif
lock->last_x_file_name = file_name;
lock->last_x_line = line;
/* Locking succeeded, we may return */
return(RW_LOCK_EX);
}
return(RW_LOCK_NOT_LOCKED);
break;
default: /* ??? */
return(RW_LOCK_NOT_LOCKED);
}
#else /* HAVE_GCC_ATOMIC_BUILTINS */
ut_ad(mutex_own(rw_lock_get_mutex(lock)));
if (rw_lock_get_writer(lock) == RW_LOCK_NOT_LOCKED) {
......@@ -576,7 +447,6 @@ rw_lock_x_lock_low(
/* Locking succeeded, we may return */
return(RW_LOCK_EX);
}
#endif /* HAVE_GCC_ATOMIC_BUILTINS */
/* Locking did not succeed */
return(RW_LOCK_NOT_LOCKED);
......@@ -602,33 +472,19 @@ rw_lock_x_lock_func(
ulint line) /* in: line where requested */
{
ulint index; /* index of the reserved wait cell */
ulint state = RW_LOCK_NOT_LOCKED; /* lock state acquired */
#ifdef HAVE_GCC_ATOMIC_BUILTINS
ulint prev_state = RW_LOCK_NOT_LOCKED;
#endif
ulint state; /* lock state acquired */
ulint i; /* spin round count */
ut_ad(rw_lock_validate(lock));
lock_loop:
i = 0;
#ifdef HAVE_GCC_ATOMIC_BUILTINS
prev_state = state;
#else
/* Acquire the mutex protecting the rw-lock fields */
mutex_enter_fast(&(lock->mutex));
#endif
state = rw_lock_x_lock_low(lock, pass, file_name, line);
#ifdef HAVE_GCC_ATOMIC_BUILTINS
if (state != prev_state) i=0; /* if progress, reset counter. */
#else
mutex_exit(&(lock->mutex));
#endif
spin_loop:
if (state == RW_LOCK_EX) {
return; /* Locking succeeded */
......@@ -636,9 +492,10 @@ spin_loop:
} else if (state == RW_LOCK_NOT_LOCKED) {
/* Spin waiting for the writer field to become free */
i = 0;
while (i < SYNC_SPIN_ROUNDS
&& rw_lock_get_writer(lock) != RW_LOCK_NOT_LOCKED) {
while (rw_lock_get_writer(lock) != RW_LOCK_NOT_LOCKED
&& i < SYNC_SPIN_ROUNDS) {
if (srv_spin_wait_delay) {
ut_delay(ut_rnd_interval(0,
srv_spin_wait_delay));
......@@ -652,12 +509,9 @@ spin_loop:
} else if (state == RW_LOCK_WAIT_EX) {
/* Spin waiting for the reader count field to become zero */
i = 0;
#ifdef HAVE_GCC_ATOMIC_BUILTINS
while (lock->lock_word != RW_LOCK_BIAS
#else
while (rw_lock_get_reader_count(lock) != 0
#endif
&& i < SYNC_SPIN_ROUNDS) {
if (srv_spin_wait_delay) {
ut_delay(ut_rnd_interval(0,
......@@ -670,6 +524,7 @@ spin_loop:
os_thread_yield();
}
} else {
i = 0; /* Eliminate a compiler warning */
ut_error;
}
......@@ -686,69 +541,34 @@ spin_loop:
/* We try once again to obtain the lock. Acquire the mutex protecting
the rw-lock fields */
#ifdef HAVE_GCC_ATOMIC_BUILTINS
prev_state = state;
#else
mutex_enter(rw_lock_get_mutex(lock));
#endif
state = rw_lock_x_lock_low(lock, pass, file_name, line);
#ifdef HAVE_GCC_ATOMIC_BUILTINS
if (state != prev_state) i=0; /* if progress, reset counter. */
#endif
if (state == RW_LOCK_EX) {
#ifndef HAVE_GCC_ATOMIC_BUILTINS
mutex_exit(rw_lock_get_mutex(lock));
#endif
return; /* Locking succeeded */
}
#ifdef HAVE_GCC_ATOMIC_BUILTINS
/* like sync0sync.c doing */
i++;
if (i < SYNC_SPIN_ROUNDS) {
goto spin_loop;
}
#endif
rw_x_system_call_count++;
sync_array_reserve_cell(sync_primary_wait_array,
lock,
#ifdef __WIN__
/* On windows RW_LOCK_WAIT_EX signifies
that this thread should wait on the
special wait_ex_event. */
(state == RW_LOCK_WAIT_EX)
? RW_LOCK_WAIT_EX :
#endif
RW_LOCK_EX,
file_name, line,
&index);
if (state == RW_LOCK_WAIT_EX) {
rw_lock_set_wx_waiters(lock, 1);
} else {
rw_lock_set_x_waiters(lock, 1);
}
rw_lock_set_waiters(lock, 1);
#ifdef HAVE_GCC_ATOMIC_BUILTINS
/* like sync0sync.c doing */
for (i = 0; i < 4; i++) {
prev_state = state;
state = rw_lock_x_lock_low(lock, pass, file_name, line);
if (state == RW_LOCK_EX) {
sync_array_free_cell(sync_primary_wait_array, index);
return; /* Locking succeeded */
}
if (state != prev_state) {
/* retry! */
sync_array_free_cell(sync_primary_wait_array, index);
goto lock_loop;
}
}
#else
mutex_exit(rw_lock_get_mutex(lock));
#endif
if (srv_print_latch_waits) {
fprintf(stderr,
......@@ -910,9 +730,7 @@ rw_lock_own(
ut_ad(lock);
ut_ad(rw_lock_validate(lock));
#ifndef HAVE_GCC_ATOMIC_BUILTINS
mutex_enter(&(lock->mutex));
#endif
info = UT_LIST_GET_FIRST(lock->debug_list);
......@@ -922,9 +740,7 @@ rw_lock_own(
&& (info->pass == 0)
&& (info->lock_type == lock_type)) {
#ifndef HAVE_GCC_ATOMIC_BUILTINS
mutex_exit(&(lock->mutex));
#endif
/* Found! */
return(TRUE);
......@@ -932,9 +748,7 @@ rw_lock_own(
info = UT_LIST_GET_NEXT(list, info);
}
#ifndef HAVE_GCC_ATOMIC_BUILTINS
mutex_exit(&(lock->mutex));
#endif
return(FALSE);
}
......@@ -956,25 +770,21 @@ rw_lock_is_locked(
ut_ad(lock);
ut_ad(rw_lock_validate(lock));
#ifndef HAVE_GCC_ATOMIC_BUILTINS
mutex_enter(&(lock->mutex));
#endif
if (lock_type == RW_LOCK_SHARED) {
if (lock->reader_count > 0) {
ret = TRUE;
}
} else if (lock_type == RW_LOCK_EX) {
if (rw_lock_get_writer(lock) == RW_LOCK_EX) {
if (lock->writer == RW_LOCK_EX) {
ret = TRUE;
}
} else {
ut_error;
}
#ifndef HAVE_GCC_ATOMIC_BUILTINS
mutex_exit(&(lock->mutex));
#endif
return(ret);
}
......@@ -1004,26 +814,16 @@ rw_lock_list_print_info(
count++;
#ifndef HAVE_GCC_ATOMIC_BUILTINS
mutex_enter(&(lock->mutex));
#endif
if ((rw_lock_get_writer(lock) != RW_LOCK_NOT_LOCKED)
|| (rw_lock_get_reader_count(lock) != 0)
|| (rw_lock_get_s_waiters(lock) != 0)
|| (rw_lock_get_x_waiters(lock) != 0)
|| (rw_lock_get_wx_waiters(lock) != 0)) {
|| (rw_lock_get_waiters(lock) != 0)) {
fprintf(file, "RW-LOCK: %p ", (void*) lock);
if (rw_lock_get_s_waiters(lock)) {
fputs(" s_waiters for the lock exist,", file);
}
if (rw_lock_get_x_waiters(lock)) {
fputs(" x_waiters for the lock exist\n", file);
}
if (rw_lock_get_wx_waiters(lock)) {
fputs(" wait_ex_waiters for the lock exist\n", file);
if (rw_lock_get_waiters(lock)) {
fputs(" Waiters for the lock exist\n", file);
} else {
putc('\n', file);
}
......@@ -1035,9 +835,7 @@ rw_lock_list_print_info(
}
}
#ifndef HAVE_GCC_ATOMIC_BUILTINS
mutex_exit(&(lock->mutex));
#endif
lock = UT_LIST_GET_NEXT(list, lock);
}
......@@ -1062,18 +860,10 @@ rw_lock_print(
if ((rw_lock_get_writer(lock) != RW_LOCK_NOT_LOCKED)
|| (rw_lock_get_reader_count(lock) != 0)
|| (rw_lock_get_s_waiters(lock) != 0)
|| (rw_lock_get_x_waiters(lock) != 0)
|| (rw_lock_get_wx_waiters(lock) != 0)) {
|| (rw_lock_get_waiters(lock) != 0)) {
if (rw_lock_get_s_waiters(lock)) {
fputs(" s_waiters for the lock exist,", stderr);
}
if (rw_lock_get_x_waiters(lock)) {
fputs(" x_waiters for the lock exist\n", stderr);
}
if (rw_lock_get_wx_waiters(lock)) {
fputs(" wait_ex_waiters for the lock exist\n", stderr);
if (rw_lock_get_waiters(lock)) {
fputs(" Waiters for the lock exist\n", stderr);
} else {
putc('\n', stderr);
}
......@@ -1132,18 +922,14 @@ rw_lock_n_locked(void)
lock = UT_LIST_GET_FIRST(rw_lock_list);
while (lock != NULL) {
#ifndef HAVE_GCC_ATOMIC_BUILTINS
mutex_enter(rw_lock_get_mutex(lock));
#endif
if ((rw_lock_get_writer(lock) != RW_LOCK_NOT_LOCKED)
|| (rw_lock_get_reader_count(lock) != 0)) {
count++;
}
#ifndef HAVE_GCC_ATOMIC_BUILTINS
mutex_exit(rw_lock_get_mutex(lock));
#endif
lock = UT_LIST_GET_NEXT(list, lock);
}
......
......@@ -32,7 +32,6 @@ static mutex_t thr_local_mutex;
/* The hash table. The module is not yet initialized when it is NULL. */
static hash_table_t* thr_local_hash = NULL;
ulint thr_local_hash_n_nodes = 0;
/* The private data for each thread should be put to
the structure below and the accessor functions written
......@@ -178,7 +177,6 @@ thr_local_create(void)
os_thread_pf(os_thread_get_curr_id()),
local);
thr_local_hash_n_nodes++;
mutex_exit(&thr_local_mutex);
}
......@@ -206,7 +204,6 @@ thr_local_free(
HASH_DELETE(thr_local_t, hash, thr_local_hash,
os_thread_pf(id), local);
thr_local_hash_n_nodes--;
mutex_exit(&thr_local_mutex);
......@@ -229,29 +226,3 @@ thr_local_init(void)
mutex_create(&thr_local_mutex, SYNC_THR_LOCAL);
}
/*************************************************************************
Return local hash table informations. */
ulint
thr_local_hash_cells(void)
/*======================*/
{
if (thr_local_hash) {
return (thr_local_hash->n_cells);
} else {
return 0;
}
}
ulint
thr_local_hash_nodes(void)
/*======================*/
{
if (thr_local_hash) {
return (thr_local_hash_n_nodes
* (sizeof(thr_local_t) + MEM_BLOCK_HEADER_SIZE));
} else {
return 0;
}
}
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment