Commit 76e929a9 authored by unknown's avatar unknown

MDEV-4984: Implement MASTER_GTID_WAIT() and @@LAST_GTID.

Rewrite the gtid_waiting::wait_for_gtid() function.
The code was rubbish (and buggy). Now the logic is
much clearer.

Also fix a missing slave sync that could cause test failure.
parent 3c97d24f
......@@ -131,6 +131,7 @@ include/stop_slave.inc
CHANGE MASTER TO master_host = '127.0.0.1', master_port = SERVER_MYPORT_3;
include/start_slave.inc
DROP TABLE t1,t2;
include/save_master_gtid.inc
*** A few more checks for BINLOG_GTID_POS function ***
SELECT BINLOG_GTID_POS();
ERROR 42000: Incorrect parameter count in the call to native function 'BINLOG_GTID_POS'
......@@ -167,6 +168,7 @@ NULL
Warnings:
Warning 1916 Got overflow when converting '18446744073709551616' to INT. Value truncated.
*** Some tests of @@GLOBAL.gtid_binlog_state ***
include/sync_with_master_gtid.inc
include/stop_slave.inc
SET @old_state= @@GLOBAL.gtid_binlog_state;
SET GLOBAL gtid_binlog_state = '';
......
......@@ -139,6 +139,7 @@ eval CHANGE MASTER TO master_host = '127.0.0.1', master_port = $SERVER_MYPORT_3;
connection server_1;
DROP TABLE t1,t2;
--source include/save_master_gtid.inc
--echo *** A few more checks for BINLOG_GTID_POS function ***
--let $valid_binlog_name = query_get_value(SHOW BINARY LOGS,Log_name,1)
......@@ -160,6 +161,7 @@ eval SELECT BINLOG_GTID_POS('$valid_binlog_name',18446744073709551616);
--echo *** Some tests of @@GLOBAL.gtid_binlog_state ***
--connection server_2
--source include/sync_with_master_gtid.inc
--source include/stop_slave.inc
--connection server_1
......
This diff is collapsed.
......@@ -40,6 +40,57 @@ enum enum_gtid_skip_type {
};
/*
Structure to keep track of threads waiting in MASTER_GTID_WAIT().
Since replication is (mostly) single-threaded, we want to minimise the
performance impact on that from MASTER_GTID_WAIT(). To achieve this, we
are careful to keep the common lock between replication threads and
MASTER_GTID_WAIT threads held for as short as possible. We keep only
a single thread waiting to be notified by the replication threads; this
thread then handles all the (potentially heavy) lifting of dealing with
all current waiting threads.
*/
struct gtid_waiting {
/* Elements in the hash, basically a priority queue for each domain. */
struct hash_element {
QUEUE queue;
uint32 domain_id;
};
/* A priority queue to handle waiters in one domain in seq_no order. */
struct queue_element {
uint64 wait_seq_no;
THD *thd;
int queue_idx;
/*
do_small_wait is true if we have responsibility for ensuring that there
is a small waiter.
*/
bool do_small_wait;
/*
The flag `done' is set when the wait is completed (either due to reaching
the position waited for, or due to timeout or kill). The queue_element
is in the queue if and only if `done' is true.
*/
bool done;
};
mysql_mutex_t LOCK_gtid_waiting;
HASH hash;
void init();
void destroy();
hash_element *get_entry(uint32 domain_id);
int wait_for_pos(THD *thd, String *gtid_str, longlong timeout_us);
void promote_new_waiter(gtid_waiting::hash_element *he);
int wait_for_gtid(THD *thd, rpl_gtid *wait_gtid, struct timespec *wait_until);
void process_wait_hash(uint64 wakeup_seq_no, gtid_waiting::hash_element *he);
int register_in_wait_queue(THD *thd, rpl_gtid *wait_gtid, hash_element *he,
queue_element *elem);
void remove_from_wait_queue(hash_element *he, queue_element *elem);
};
/*
Replication slave state.
......@@ -68,9 +119,14 @@ struct rpl_slave_state
/* Highest seq_no seen so far in this domain. */
uint64 highest_seq_no;
/*
If min_wait_seq_no is non-zero, then it is the smallest seq_no in this
domain that someone is doing MASTER_GTID_WAIT() on. When we reach this
seq_no, we need to signal the waiter on COND_wait_gtid.
If this is non-NULL, then it is the waiter responsible for the small
wait in MASTER_GTID_WAIT().
*/
gtid_waiting::queue_element *gtid_waiter;
/*
If gtid_waiter is non-NULL, then this is the seq_no that its
MASTER_GTID_WAIT() is waiting on. When we reach this seq_no, we need to
signal the waiter on COND_wait_gtid.
*/
uint64 min_wait_seq_no;
mysql_cond_t COND_wait_gtid;
......@@ -215,48 +271,6 @@ struct slave_connection_state
};
/*
Structure to keep track of threads waiting in MASTER_GTID_WAIT().
Since replication is (mostly) single-threaded, we want to minimise the
performance impact on that from MASTER_GTID_WAIT(). To achieve this, we
are careful to keep the common lock between replication threads and
MASTER_GTID_WAIT threads held for as short as possible. We keep only
a single thread waiting to be notified by the replication threads; this
thread then handles all the (potentially heavy) lifting of dealing with
all current waiting threads.
*/
struct gtid_waiting {
/* Elements in the hash, basically a priority queue for each domain. */
struct hash_element {
QUEUE queue;
uint32 domain_id;
};
/* A priority queue to handle waiters in one domain in seq_no order. */
struct queue_element {
uint64 wait_seq_no;
THD *thd;
int queue_idx;
enum { DONE, TAKEOVER } wakeup_reason;
};
mysql_mutex_t LOCK_gtid_waiting;
HASH hash;
void init();
void destroy();
hash_element *get_entry(uint32 domain_id);
int wait_for_pos(THD *thd, String *gtid_str, longlong timeout_us);
void promote_new_waiter(gtid_waiting::hash_element *he);
int wait_for_gtid(THD *thd, rpl_gtid *wait_gtid, struct timespec *wait_until);
void process_wait_hash(uint64 wakeup_seq_no, gtid_waiting::hash_element *he);
hash_element *register_in_wait_hash(THD *thd, rpl_gtid *wait_gtid,
queue_element *elem);
void remove_from_wait_hash(hash_element *e, queue_element *elem);
};
extern bool rpl_slave_state_tostring_helper(String *dest, const rpl_gtid *gtid,
bool *first);
extern int gtid_check_rpl_slave_state_table(TABLE *table);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment