Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
M
mariadb
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Kirill Smelkov
mariadb
Commits
86f2f582
Commit
86f2f582
authored
Jun 16, 2006
by
petr@outpost.site
Browse files
Options
Browse Files
Download
Plain Diff
Merge pchardin@bk-internal.mysql.com:/home/bk/mysql-5.1
into mysql.com:/home/cps/mysql/devel/5.1-csv-remove-mmap
parents
1327ca01
a3c4c13f
Changes
14
Hide whitespace changes
Inline
Side-by-side
Showing
14 changed files
with
1087 additions
and
714 deletions
+1087
-714
mysql-test/r/csv.result
mysql-test/r/csv.result
+2
-2
sql/ha_ndbcluster_binlog.cc
sql/ha_ndbcluster_binlog.cc
+2
-2
sql/log.cc
sql/log.cc
+610
-446
sql/log.h
sql/log.h
+81
-52
sql/log_event.h
sql/log_event.h
+1
-1
sql/mysql_priv.h
sql/mysql_priv.h
+1
-1
sql/mysqld.cc
sql/mysqld.cc
+3
-3
sql/rpl_injector.cc
sql/rpl_injector.cc
+1
-1
sql/rpl_injector.h
sql/rpl_injector.h
+2
-2
sql/rpl_rli.h
sql/rpl_rli.h
+1
-1
sql/slave.cc
sql/slave.cc
+4
-3
sql/slave.h
sql/slave.h
+2
-2
storage/csv/ha_tina.cc
storage/csv/ha_tina.cc
+319
-193
storage/csv/ha_tina.h
storage/csv/ha_tina.h
+58
-5
No files found.
mysql-test/r/csv.result
View file @
86f2f582
...
...
@@ -4944,10 +4944,10 @@ val
UPDATE bug13894 SET val=6 WHERE val=10;
SELECT * FROM bug13894;
val
5
11
6
6
5
11
DROP TABLE bug13894;
DROP TABLE IF EXISTS bug14672;
CREATE TABLE bug14672 (c1 integer) engine = CSV;
...
...
sql/ha_ndbcluster_binlog.cc
View file @
86f2f582
...
...
@@ -453,7 +453,7 @@ static void ndbcluster_binlog_wait(THD *thd)
}
/*
Called from MYSQL_LOG::reset_logs in log.cc when binlog is emptied
Called from MYSQL_
BIN_
LOG::reset_logs in log.cc when binlog is emptied
*/
static
int
ndbcluster_reset_logs
(
THD
*
thd
)
{
...
...
@@ -477,7 +477,7 @@ static int ndbcluster_reset_logs(THD *thd)
}
/*
Called from MYSQL_LOG::purge_logs in log.cc when the binlog "file"
Called from MYSQL_
BIN_
LOG::purge_logs in log.cc when the binlog "file"
is removed
*/
...
...
sql/log.cc
View file @
86f2f582
...
...
@@ -42,7 +42,7 @@ extern char *opt_logname, *opt_slow_logname;
LOGGER
logger
;
MYSQL_LOG
mysql_bin_log
;
MYSQL_
BIN_
LOG
mysql_bin_log
;
ulong
sync_binlog_counter
=
0
;
static
bool
test_if_number
(
const
char
*
str
,
...
...
@@ -86,8 +86,8 @@ handlerton binlog_hton;
SYNOPSIS
open_log_table()
log_type type of the log table to open: QUERY_LOG_GENERAL
or QUERY_LOG_SLOW
log_t
able_t
ype type of the log table to open: QUERY_LOG_GENERAL
or QUERY_LOG_SLOW
DESCRIPTION
...
...
@@ -102,14 +102,14 @@ handlerton binlog_hton;
TRUE - error occured
*/
bool
Log_to_csv_event_handler
::
open_log_table
(
uint
log_type
)
bool
Log_to_csv_event_handler
::
open_log_table
(
uint
log_t
able_t
ype
)
{
THD
*
log_thd
,
*
curr
=
current_thd
;
TABLE_LIST
*
table
;
bool
error
=
FALSE
;
DBUG_ENTER
(
"open_log_table"
);
switch
(
log_type
)
{
switch
(
log_t
able_t
ype
)
{
case
QUERY_LOG_GENERAL
:
log_thd
=
general_log_thd
;
table
=
&
general_log
;
...
...
@@ -222,8 +222,8 @@ Log_to_csv_event_handler::~Log_to_csv_event_handler()
SYNOPSIS
reopen_log_table()
log_type type of the log table to open: QUERY_LOG_GENERAL
or QUERY_LOG_SLOW
log_t
able_t
ype type of the log table to open: QUERY_LOG_GENERAL
or QUERY_LOG_SLOW
DESCRIPTION
...
...
@@ -240,12 +240,12 @@ Log_to_csv_event_handler::~Log_to_csv_event_handler()
TRUE - open_log_table() returned an error
*/
bool
Log_to_csv_event_handler
::
reopen_log_table
(
uint
log_type
)
bool
Log_to_csv_event_handler
::
reopen_log_table
(
uint
log_t
able_t
ype
)
{
/* don't open the log table, if it wasn't enabled during startup */
if
(
!
logger
.
is_log_tables_initialized
)
return
FALSE
;
return
open_log_table
(
log_type
);
return
open_log_table
(
log_t
able_t
ype
);
}
void
Log_to_csv_event_handler
::
cleanup
()
...
...
@@ -525,9 +525,8 @@ void Log_to_file_event_handler::cleanup()
void
Log_to_file_event_handler
::
flush
()
{
/* reopen log files */
mysql_log
.
new_file
(
1
);
mysql_slow_log
.
new_file
(
1
);
mysql_log
.
reopen_file
();
mysql_slow_log
.
reopen_file
();
}
/*
...
...
@@ -582,9 +581,9 @@ void LOGGER::cleanup_end()
}
void
LOGGER
::
close_log_table
(
uint
log_type
,
bool
lock_in_use
)
void
LOGGER
::
close_log_table
(
uint
log_t
able_t
ype
,
bool
lock_in_use
)
{
table_log_handler
->
close_log_table
(
log_type
,
lock_in_use
);
table_log_handler
->
close_log_table
(
log_t
able_t
ype
,
lock_in_use
);
}
...
...
@@ -624,9 +623,9 @@ void LOGGER::init_log_tables()
}
bool
LOGGER
::
reopen_log_table
(
uint
log_type
)
bool
LOGGER
::
reopen_log_table
(
uint
log_t
able_t
ype
)
{
return
table_log_handler
->
reopen_log_table
(
log_type
);
return
table_log_handler
->
reopen_log_table
(
log_t
able_t
ype
);
}
...
...
@@ -961,9 +960,9 @@ int LOGGER::set_handlers(uint error_log_printer,
SYNOPSIS
close_log_table()
log_t
ype
type of the log table to close: QUERY_LOG_GENERAL
or QUERY_LOG_SLOW
lock_in_use Set to TRUE if the caller owns LOCK_open. FALSE otherwise.
log_t
able_type
type of the log table to close: QUERY_LOG_GENERAL
or QUERY_LOG_SLOW
lock_in_use
Set to TRUE if the caller owns LOCK_open. FALSE otherwise.
DESCRIPTION
...
...
@@ -973,7 +972,7 @@ int LOGGER::set_handlers(uint error_log_printer,
*/
void
Log_to_csv_event_handler
::
close_log_table
(
uint
log_type
,
bool
lock_in_use
)
close_log_table
(
uint
log_t
able_t
ype
,
bool
lock_in_use
)
{
THD
*
log_thd
,
*
curr
=
current_thd
;
TABLE_LIST
*
table
;
...
...
@@ -981,7 +980,7 @@ void Log_to_csv_event_handler::
if
(
!
logger
.
is_log_tables_initialized
)
return
;
/* do nothing */
switch
(
log_type
)
{
switch
(
log_t
able_t
ype
)
{
case
QUERY_LOG_GENERAL
:
log_thd
=
general_log_thd
;
table
=
&
general_log
;
...
...
@@ -1022,7 +1021,7 @@ void Log_to_csv_event_handler::
/*
this function is mostly a placeholder.
conceptually, binlog initialization (now mostly done in MYSQL_LOG::open)
conceptually, binlog initialization (now mostly done in MYSQL_
BIN_
LOG::open)
should be moved here.
*/
...
...
@@ -1111,7 +1110,7 @@ static int binlog_prepare(THD *thd, bool all)
do nothing.
just pretend we can do 2pc, so that MySQL won't
switch to 1pc.
real work will be done in MYSQL_LOG::log()
real work will be done in MYSQL_
BIN_
LOG::log()
*/
return
0
;
}
...
...
@@ -1127,7 +1126,7 @@ static int binlog_commit(THD *thd, bool all)
if
(
trx_data
->
empty
())
{
// we're here because trans_log was flushed in MYSQL_LOG::log()
// we're here because trans_log was flushed in MYSQL_
BIN_
LOG::log()
DBUG_RETURN
(
0
);
}
Query_log_event
qev
(
thd
,
STRING_WITH_LEN
(
"COMMIT"
),
TRUE
,
FALSE
);
...
...
@@ -1367,12 +1366,119 @@ static int find_uniq_filename(char *name)
}
void
MYSQL_LOG
::
init
(
enum_log_type
log_type_arg
,
enum
cache_type
io_cache_type_arg
)
{
DBUG_ENTER
(
"MYSQL_LOG::init"
);
log_type
=
log_type_arg
;
io_cache_type
=
io_cache_type_arg
;
DBUG_PRINT
(
"info"
,(
"log_type: %d"
,
log_type
));
DBUG_VOID_RETURN
;
}
/*
Open a (new) log file.
SYNOPSIS
open()
log_name The name of the log to open
log_type_arg The type of the log. E.g. LOG_NORMAL
new_name The new name for the logfile. This is only needed
when the method is used to open the binlog file.
io_cache_type_arg The type of the IO_CACHE to use for this log file
DESCRIPTION
Open the logfile, init IO_CACHE and write startup messages
(in case of general and slow query logs).
RETURN VALUES
0 ok
1 error
*/
bool
MYSQL_LOG
::
open
(
const
char
*
log_name
,
enum_log_type
log_type_arg
,
const
char
*
new_name
,
enum
cache_type
io_cache_type_arg
)
{
char
buff
[
FN_REFLEN
];
File
file
=
-
1
;
int
open_flags
=
O_CREAT
|
O_BINARY
;
DBUG_ENTER
(
"MYSQL_LOG::open"
);
DBUG_PRINT
(
"enter"
,
(
"log_type: %d"
,
(
int
)
log_type_arg
));
write_error
=
0
;
init
(
log_type_arg
,
io_cache_type_arg
);
if
(
!
(
name
=
my_strdup
(
log_name
,
MYF
(
MY_WME
))))
{
name
=
(
char
*
)
log_name
;
// for the error message
goto
err
;
}
if
(
new_name
)
strmov
(
log_file_name
,
new_name
);
else
if
(
generate_new_name
(
log_file_name
,
name
))
goto
err
;
if
(
io_cache_type
==
SEQ_READ_APPEND
)
open_flags
|=
O_RDWR
|
O_APPEND
;
else
open_flags
|=
O_WRONLY
|
(
log_type
==
LOG_BIN
?
0
:
O_APPEND
);
db
[
0
]
=
0
;
if
((
file
=
my_open
(
log_file_name
,
open_flags
,
MYF
(
MY_WME
|
ME_WAITTANG
)))
<
0
||
init_io_cache
(
&
log_file
,
file
,
IO_SIZE
,
io_cache_type
,
my_tell
(
file
,
MYF
(
MY_WME
)),
0
,
MYF
(
MY_WME
|
MY_NABP
|
((
log_type
==
LOG_BIN
)
?
MY_WAIT_IF_FULL
:
0
))))
goto
err
;
if
(
log_type
==
LOG_NORMAL
)
{
char
*
end
;
int
len
=
my_snprintf
(
buff
,
sizeof
(
buff
),
"%s, Version: %s. "
#ifdef EMBEDDED_LIBRARY
"embedded library
\n
"
,
my_progname
,
server_version
#elif __NT__
"started with:
\n
TCP Port: %d, Named Pipe: %s
\n
"
,
my_progname
,
server_version
,
mysqld_port
,
mysqld_unix_port
#else
"started with:
\n
Tcp port: %d Unix socket: %s
\n
"
,
my_progname
,
server_version
,
mysqld_port
,
mysqld_unix_port
#endif
);
end
=
strnmov
(
buff
+
len
,
"Time Id Command Argument
\n
"
,
sizeof
(
buff
)
-
len
);
if
(
my_b_write
(
&
log_file
,
(
byte
*
)
buff
,
(
uint
)
(
end
-
buff
))
||
flush_io_cache
(
&
log_file
))
goto
err
;
}
log_state
=
LOG_OPENED
;
DBUG_RETURN
(
0
);
err:
sql_print_error
(
"Could not use %s for logging (error %d). \
Turning logging off for the whole duration of the MySQL server process. \
To turn it on again: fix the cause, \
shutdown the MySQL server and restart it."
,
name
,
errno
);
if
(
file
>=
0
)
my_close
(
file
,
MYF
(
0
));
end_io_cache
(
&
log_file
);
safeFree
(
name
);
log_state
=
LOG_CLOSED
;
DBUG_RETURN
(
1
);
}
MYSQL_LOG
::
MYSQL_LOG
()
:
bytes_written
(
0
),
last_time
(
0
),
query_start
(
0
),
name
(
0
),
prepared_xids
(
0
),
log_type
(
LOG_CLOSED
),
file_id
(
1
),
open_count
(
1
),
write_error
(
FALSE
),
inited
(
FALSE
),
need_start_event
(
TRUE
),
m_table_map_version
(
0
),
description_event_for_exec
(
0
),
description_event_for_queue
(
0
)
:
name
(
0
),
log_type
(
LOG_UNKNOWN
),
log_state
(
LOG_CLOSED
),
write_error
(
FALSE
),
inited
(
FALSE
)
{
/*
We don't want to initialize LOCK_Log here as such initialization depends on
...
...
@@ -1380,9 +1486,54 @@ MYSQL_LOG::MYSQL_LOG()
called only in main(). Doing initialization here would make it happen
before main().
*/
index_file_name
[
0
]
=
0
;
bzero
((
char
*
)
&
log_file
,
sizeof
(
log_file
));
bzero
((
char
*
)
&
index_file
,
sizeof
(
index_file
));
bzero
((
char
*
)
&
log_file
,
sizeof
(
log_file
));
}
void
MYSQL_LOG
::
init_pthread_objects
()
{
DBUG_ASSERT
(
inited
==
0
);
inited
=
1
;
(
void
)
pthread_mutex_init
(
&
LOCK_log
,
MY_MUTEX_INIT_SLOW
);
}
/*
Close the log file
SYNOPSIS
close()
exiting Bitmask. For the slow and general logs the only used bit is
LOG_CLOSE_TO_BE_OPENED. This is used if we intend to call
open at once after close.
NOTES
One can do an open on the object at once after doing a close.
The internal structures are not freed until cleanup() is called
*/
void
MYSQL_LOG
::
close
(
uint
exiting
)
{
// One can't set log_type here!
DBUG_ENTER
(
"MYSQL_LOG::close"
);
DBUG_PRINT
(
"enter"
,(
"exiting: %d"
,
(
int
)
exiting
));
if
(
log_state
==
LOG_OPENED
)
{
end_io_cache
(
&
log_file
);
if
(
my_sync
(
log_file
.
file
,
MYF
(
MY_WME
))
&&
!
write_error
)
{
write_error
=
1
;
sql_print_error
(
ER
(
ER_ERROR_ON_WRITE
),
name
,
errno
);
}
if
(
my_close
(
log_file
.
file
,
MYF
(
MY_WME
))
&&
!
write_error
)
{
write_error
=
1
;
sql_print_error
(
ER
(
ER_ERROR_ON_WRITE
),
name
,
errno
);
}
}
log_state
=
(
exiting
&
LOG_CLOSE_TO_BE_OPENED
)
?
LOG_TO_BE_OPENED
:
LOG_CLOSED
;
safeFree
(
name
);
DBUG_VOID_RETURN
;
}
/* this is called only once */
...
...
@@ -1393,12 +1544,8 @@ void MYSQL_LOG::cleanup()
if
(
inited
)
{
inited
=
0
;
close
(
LOG_CLOSE_INDEX
|
LOG_CLOSE_STOP_EVENT
);
delete
description_event_for_queue
;
delete
description_event_for_exec
;
(
void
)
pthread_mutex_destroy
(
&
LOCK_log
);
(
void
)
pthread_mutex_destroy
(
&
LOCK_index
);
(
void
)
pthread_cond_destroy
(
&
update_cond
);
close
(
0
);
}
DBUG_VOID_RETURN
;
}
...
...
@@ -1406,8 +1553,8 @@ void MYSQL_LOG::cleanup()
int
MYSQL_LOG
::
generate_new_name
(
char
*
new_name
,
const
char
*
log_name
)
{
fn_format
(
new_name
,
log_name
,
mysql_data_home
,
""
,
4
);
if
(
log_type
!=
LOG_NORMAL
)
fn_format
(
new_name
,
log_name
,
mysql_data_home
,
""
,
4
);
if
(
log_type
==
LOG_BIN
)
{
if
(
!
fn_ext
(
log_name
)[
0
])
{
...
...
@@ -1422,33 +1569,287 @@ int MYSQL_LOG::generate_new_name(char *new_name, const char *log_name)
}
void
MYSQL_LOG
::
init
(
enum_log_type
log_type_arg
,
enum
cache_type
io_cache_type_arg
,
bool
no_auto_events_arg
,
ulong
max_size_arg
)
/*
Reopen the log file
SYNOPSIS
reopen_file()
DESCRIPTION
Reopen the log file. The method is used during FLUSH LOGS
and locks LOCK_log mutex
*/
void
MYSQL_QUERY_LOG
::
reopen_file
()
{
DBUG_ENTER
(
"MYSQL_LOG::init"
);
log_type
=
log_type_arg
;
io_cache_type
=
io_cache_type_arg
;
no_auto_events
=
no_auto_events_arg
;
max_size
=
max_size_arg
;
DBUG_PRINT
(
"info"
,(
"log_type: %d max_size: %lu"
,
log_type
,
max_size
));
char
*
save_name
;
DBUG_ENTER
(
"MYSQL_LOG::reopen_file"
);
if
(
!
is_open
())
{
DBUG_PRINT
(
"info"
,(
"log is closed"
));
DBUG_VOID_RETURN
;
}
pthread_mutex_lock
(
&
LOCK_log
);
save_name
=
name
;
name
=
0
;
// Don't free name
close
(
LOG_CLOSE_TO_BE_OPENED
);
/*
Note that at this point, log_state != LOG_CLOSED (important for is_open()).
*/
open
(
save_name
,
log_type
,
0
,
io_cache_type
);
my_free
(
save_name
,
MYF
(
0
));
pthread_mutex_unlock
(
&
LOCK_log
);
DBUG_VOID_RETURN
;
}
void
MYSQL_LOG
::
init_pthread_objects
()
/*
Write a command to traditional general log file
SYNOPSIS
write()
event_time command start timestamp
user_host the pointer to the string with user@host info
user_host_len length of the user_host string. this is computed once
and passed to all general log event handlers
thread_id Id of the thread, issued a query
command_type the type of the command being logged
command_type_len the length of the string above
sql_text the very text of the query being executed
sql_text_len the length of sql_text string
DESCRIPTION
Log given command to to normal (not rotable) log file
RETURN
FASE - OK
TRUE - error occured
*/
bool
MYSQL_QUERY_LOG
::
write
(
time_t
event_time
,
const
char
*
user_host
,
uint
user_host_len
,
int
thread_id
,
const
char
*
command_type
,
uint
command_type_len
,
const
char
*
sql_text
,
uint
sql_text_len
)
{
DBUG_ASSERT
(
inited
==
0
);
inited
=
1
;
(
void
)
pthread_mutex_init
(
&
LOCK_log
,
MY_MUTEX_INIT_SLOW
);
(
void
)
pthread_mutex_init
(
&
LOCK_index
,
MY_MUTEX_INIT_SLOW
);
(
void
)
pthread_cond_init
(
&
update_cond
,
0
);
char
buff
[
32
];
uint
length
=
0
;
char
time_buff
[
MAX_TIME_SIZE
];
struct
tm
start
;
uint
time_buff_len
=
0
;
/* Test if someone closed between the is_open test and lock */
if
(
is_open
())
{
/* Note that my_b_write() assumes it knows the length for this */
if
(
event_time
!=
last_time
)
{
last_time
=
event_time
;
localtime_r
(
&
event_time
,
&
start
);
time_buff_len
=
my_snprintf
(
time_buff
,
MAX_TIME_SIZE
,
"%02d%02d%02d %2d:%02d:%02d"
,
start
.
tm_year
%
100
,
start
.
tm_mon
+
1
,
start
.
tm_mday
,
start
.
tm_hour
,
start
.
tm_min
,
start
.
tm_sec
);
if
(
my_b_write
(
&
log_file
,
(
byte
*
)
&
time_buff
,
time_buff_len
))
goto
err
;
}
else
if
(
my_b_write
(
&
log_file
,
(
byte
*
)
"
\t\t
"
,
2
)
<
0
)
goto
err
;
/* command_type, thread_id */
length
=
my_snprintf
(
buff
,
32
,
"%5ld "
,
thread_id
);
if
(
my_b_write
(
&
log_file
,
(
byte
*
)
buff
,
length
))
goto
err
;
if
(
my_b_write
(
&
log_file
,
(
byte
*
)
command_type
,
command_type_len
))
goto
err
;
if
(
my_b_write
(
&
log_file
,
(
byte
*
)
"
\t
"
,
1
))
goto
err
;
/* sql_text */
if
(
my_b_write
(
&
log_file
,
(
byte
*
)
sql_text
,
sql_text_len
))
goto
err
;
if
(
my_b_write
(
&
log_file
,
(
byte
*
)
"
\n
"
,
1
)
||
flush_io_cache
(
&
log_file
))
goto
err
;
}
return
FALSE
;
err:
if
(
!
write_error
)
{
write_error
=
1
;
sql_print_error
(
ER
(
ER_ERROR_ON_WRITE
),
name
,
errno
);
}
return
TRUE
;
}
/*
Log a query to the traditional slow log file
SYNOPSIS
write()
thd THD of the query
current_time current timestamp
query_start_arg command start timestamp
user_host the pointer to the string with user@host info
user_host_len length of the user_host string. this is computed once
and passed to all general log event handlers
query_time Amount of time the query took to execute (in seconds)
lock_time Amount of time the query was locked (in seconds)
is_command The flag, which determines, whether the sql_text is a
query or an administrator command.
sql_text the very text of the query or administrator command
processed
sql_text_len the length of sql_text string
DESCRIPTION
Log a query to the slow log file.
RETURN
FALSE - OK
TRUE - error occured
*/
bool
MYSQL_QUERY_LOG
::
write
(
THD
*
thd
,
time_t
current_time
,
time_t
query_start_arg
,
const
char
*
user_host
,
uint
user_host_len
,
longlong
query_time
,
longlong
lock_time
,
bool
is_command
,
const
char
*
sql_text
,
uint
sql_text_len
)
{
bool
error
=
0
;
DBUG_ENTER
(
"MYSQL_QUERY_LOG::write"
);
if
(
!
is_open
())
DBUG_RETURN
(
0
);
if
(
is_open
())
{
// Safety agains reopen
int
tmp_errno
=
0
;
char
buff
[
80
],
*
end
;
uint
buff_len
;
end
=
buff
;
if
(
!
(
specialflag
&
SPECIAL_SHORT_LOG_FORMAT
))
{
Security_context
*
sctx
=
thd
->
security_ctx
;
if
(
current_time
!=
last_time
)
{
last_time
=
current_time
;
struct
tm
start
;
localtime_r
(
&
current_time
,
&
start
);
buff_len
=
my_snprintf
(
buff
,
sizeof
buff
,
"# Time: %02d%02d%02d %2d:%02d:%02d
\n
"
,
start
.
tm_year
%
100
,
start
.
tm_mon
+
1
,
start
.
tm_mday
,
start
.
tm_hour
,
start
.
tm_min
,
start
.
tm_sec
);
/* Note that my_b_write() assumes it knows the length for this */
if
(
my_b_write
(
&
log_file
,
(
byte
*
)
buff
,
buff_len
))
tmp_errno
=
errno
;
}
if
(
my_b_printf
(
&
log_file
,
"# User@Host: "
,
sizeof
(
"# User@Host: "
)
-
1
)
!=
sizeof
(
"# User@Host: "
)
-
1
)
tmp_errno
=
errno
;
if
(
my_b_printf
(
&
log_file
,
user_host
,
user_host_len
)
!=
user_host_len
)
tmp_errno
=
errno
;
if
(
my_b_write
(
&
log_file
,
(
byte
*
)
"
\n
"
,
1
))
tmp_errno
=
errno
;
}
/* For slow query log */
if
(
my_b_printf
(
&
log_file
,
"# Query_time: %lu Lock_time: %lu"
" Rows_sent: %lu Rows_examined: %lu
\n
"
,
(
ulong
)
query_time
,
(
ulong
)
lock_time
,
(
ulong
)
thd
->
sent_row_count
,
(
ulong
)
thd
->
examined_row_count
)
==
(
uint
)
-
1
)
tmp_errno
=
errno
;
if
(
thd
->
db
&&
strcmp
(
thd
->
db
,
db
))
{
// Database changed
if
(
my_b_printf
(
&
log_file
,
"use %s;
\n
"
,
thd
->
db
)
==
(
uint
)
-
1
)
tmp_errno
=
errno
;
strmov
(
db
,
thd
->
db
);
}
if
(
thd
->
last_insert_id_used
)
{
end
=
strmov
(
end
,
",last_insert_id="
);
end
=
longlong10_to_str
((
longlong
)
thd
->
current_insert_id
,
end
,
-
10
);
}
// Save value if we do an insert.
if
(
thd
->
insert_id_used
)
{
if
(
!
(
specialflag
&
SPECIAL_SHORT_LOG_FORMAT
))
{
end
=
strmov
(
end
,
",insert_id="
);
end
=
longlong10_to_str
((
longlong
)
thd
->
last_insert_id
,
end
,
-
10
);
}
}
/*
This info used to show up randomly, depending on whether the query
checked the query start time or not. now we always write current
timestamp to the slow log
*/
end
=
strmov
(
end
,
",timestamp="
);
end
=
int10_to_str
((
long
)
current_time
,
end
,
10
);
if
(
end
!=
buff
)
{
*
end
++=
';'
;
*
end
=
'\n'
;
if
(
my_b_write
(
&
log_file
,
(
byte
*
)
"SET "
,
4
)
||
my_b_write
(
&
log_file
,
(
byte
*
)
buff
+
1
,
(
uint
)
(
end
-
buff
)))
tmp_errno
=
errno
;
}
if
(
is_command
)
{
end
=
strxmov
(
buff
,
"# administrator command: "
,
NullS
);
buff_len
=
(
ulong
)
(
end
-
buff
);
my_b_write
(
&
log_file
,
(
byte
*
)
buff
,
buff_len
);
}
if
(
my_b_write
(
&
log_file
,
(
byte
*
)
sql_text
,
sql_text_len
)
||
my_b_write
(
&
log_file
,
(
byte
*
)
";
\n
"
,
2
)
||
flush_io_cache
(
&
log_file
))
tmp_errno
=
errno
;
if
(
tmp_errno
)
{
error
=
1
;
if
(
!
write_error
)
{
write_error
=
1
;
sql_print_error
(
ER
(
ER_ERROR_ON_WRITE
),
name
,
error
);
}
}
}
DBUG_RETURN
(
error
);
}
const
char
*
MYSQL_LOG
::
generate_name
(
const
char
*
log_name
,
const
char
*
suffix
,
bool
strip_ext
,
char
*
buff
)
const
char
*
suffix
,
bool
strip_ext
,
char
*
buff
)
{
if
(
!
log_name
||
!
log_name
[
0
])
{
...
...
@@ -1456,23 +1857,79 @@ const char *MYSQL_LOG::generate_name(const char *log_name,
TODO: The following should be using fn_format(); We just need to
first change fn_format() to cut the file name if it's too long.
*/
strmake
(
buff
,
glob_hostname
,
FN_REFLEN
-
5
);
strmov
(
fn_ext
(
buff
),
suffix
);
strmake
(
buff
,
glob_hostname
,
FN_REFLEN
-
5
);
strmov
(
fn_ext
(
buff
),
suffix
);
return
(
const
char
*
)
buff
;
}
// get rid of extension if the log is binary to avoid problems
if
(
strip_ext
)
{
char
*
p
=
fn_ext
(
log_name
);
uint
length
=
(
uint
)
(
p
-
log_name
);
strmake
(
buff
,
log_name
,
min
(
length
,
FN_REFLEN
));
char
*
p
=
fn_ext
(
log_name
);
uint
length
=
(
uint
)
(
p
-
log_name
);
strmake
(
buff
,
log_name
,
min
(
length
,
FN_REFLEN
));
return
(
const
char
*
)
buff
;
}
return
log_name
;
}
bool
MYSQL_LOG
::
open_index_file
(
const
char
*
index_file_name_arg
,
MYSQL_BIN_LOG
::
MYSQL_BIN_LOG
()
:
bytes_written
(
0
),
prepared_xids
(
0
),
file_id
(
1
),
open_count
(
1
),
need_start_event
(
TRUE
),
m_table_map_version
(
0
),
description_event_for_exec
(
0
),
description_event_for_queue
(
0
)
{
/*
We don't want to initialize locks here as such initialization depends on
safe_mutex (when using safe_mutex) which depends on MY_INIT(), which is
called only in main(). Doing initialization here would make it happen
before main().
*/
index_file_name
[
0
]
=
0
;
bzero
((
char
*
)
&
index_file
,
sizeof
(
index_file
));
}
/* this is called only once */
void
MYSQL_BIN_LOG
::
cleanup
()
{
DBUG_ENTER
(
"cleanup"
);
if
(
inited
)
{
inited
=
0
;
close
(
LOG_CLOSE_INDEX
|
LOG_CLOSE_STOP_EVENT
);
delete
description_event_for_queue
;
delete
description_event_for_exec
;
(
void
)
pthread_mutex_destroy
(
&
LOCK_log
);
(
void
)
pthread_mutex_destroy
(
&
LOCK_index
);
(
void
)
pthread_cond_destroy
(
&
update_cond
);
}
DBUG_VOID_RETURN
;
}
/* Init binlog-specific vars */
void
MYSQL_BIN_LOG
::
init
(
bool
no_auto_events_arg
,
ulong
max_size_arg
)
{
DBUG_ENTER
(
"MYSQL_BIN_LOG::init"
);
no_auto_events
=
no_auto_events_arg
;
max_size
=
max_size_arg
;
DBUG_PRINT
(
"info"
,(
"max_size: %lu"
,
max_size
));
DBUG_VOID_RETURN
;
}
void
MYSQL_BIN_LOG
::
init_pthread_objects
()
{
DBUG_ASSERT
(
inited
==
0
);
inited
=
1
;
(
void
)
pthread_mutex_init
(
&
LOCK_log
,
MY_MUTEX_INIT_SLOW
);
(
void
)
pthread_mutex_init
(
&
LOCK_index
,
MY_MUTEX_INIT_SLOW
);
(
void
)
pthread_cond_init
(
&
update_cond
,
0
);
}
bool
MYSQL_BIN_LOG
::
open_index_file
(
const
char
*
index_file_name_arg
,
const
char
*
log_name
)
{
File
index_file_nr
=
-
1
;
...
...
@@ -1509,10 +1966,10 @@ bool MYSQL_LOG::open_index_file(const char *index_file_name_arg,
/*
Open a (new) log file.
Open a (new)
bin
log file.
DESCRIPTION
-
If binary logs, also open the index file and r
egister the new
-
Open the log file and the index file. R
egister the new
file name in it
- When calling this when the file is in use, you must have a locks
on LOCK_log and LOCK_index.
...
...
@@ -1522,94 +1979,32 @@ bool MYSQL_LOG::open_index_file(const char *index_file_name_arg,
1 error
*/
bool
MYSQL_LOG
::
open
(
const
char
*
log_name
,
enum_log_type
log_type_arg
,
const
char
*
new_name
,
enum
cache_type
io_cache_type_arg
,
bool
no_auto_events_arg
,
ulong
max_size_arg
,
bool
null_created_arg
)
bool
MYSQL_
BIN_
LOG
::
open
(
const
char
*
log_name
,
enum_log_type
log_type_arg
,
const
char
*
new_name
,
enum
cache_type
io_cache_type_arg
,
bool
no_auto_events_arg
,
ulong
max_size_arg
,
bool
null_created_arg
)
{
char
buff
[
FN_REFLEN
];
File
file
=
-
1
;
int
open_flags
=
O_CREAT
|
O_BINARY
;
DBUG_ENTER
(
"MYSQL_LOG::open"
);
DBUG_ENTER
(
"MYSQL_
BIN_
LOG::open"
);
DBUG_PRINT
(
"enter"
,(
"log_type: %d"
,(
int
)
log_type_arg
));
last_time
=
query_start
=
0
;
write_error
=
0
;
init
(
log_type_arg
,
io_cache_type_arg
,
no_auto_events_arg
,
max_size_arg
);
if
(
!
(
name
=
my_strdup
(
log_name
,
MYF
(
MY_WME
))))
{
name
=
(
char
*
)
log_name
;
// for the error message
goto
err
;
}
if
(
new_name
)
strmov
(
log_file_name
,
new_name
);
else
if
(
generate_new_name
(
log_file_name
,
name
))
goto
err
;
if
(
io_cache_type
==
SEQ_READ_APPEND
)
open_flags
|=
O_RDWR
|
O_APPEND
;
else
open_flags
|=
O_WRONLY
|
(
log_type
==
LOG_BIN
?
0
:
O_APPEND
);
db
[
0
]
=
0
;
open_count
++
;
if
((
file
=
my_open
(
log_file_name
,
open_flags
,
MYF
(
MY_WME
|
ME_WAITTANG
)))
<
0
||
init_io_cache
(
&
log_file
,
file
,
IO_SIZE
,
io_cache_type
,
my_tell
(
file
,
MYF
(
MY_WME
)),
0
,
MYF
(
MY_WME
|
MY_NABP
|
((
log_type
==
LOG_BIN
)
?
MY_WAIT_IF_FULL
:
0
))))
goto
err
;
/* open the main log file */
if
(
MYSQL_LOG
::
open
(
log_name
,
log_type_arg
,
new_name
,
io_cache_type_arg
))
DBUG_RETURN
(
1
);
/* all warnings issued */
init
(
no_auto_events_arg
,
max_size_arg
);
open_count
++
;
DBUG_ASSERT
(
log_type
==
LOG_BIN
);
switch
(
log_type
)
{
case
LOG_NORMAL
:
{
char
*
end
;
int
len
=
my_snprintf
(
buff
,
sizeof
(
buff
),
"%s, Version: %s. "
#ifdef EMBEDDED_LIBRARY
"embedded library
\n
"
,
my_progname
,
server_version
#elif __NT__
"started with:
\n
TCP Port: %d, Named Pipe: %s
\n
"
,
my_progname
,
server_version
,
mysqld_port
,
mysqld_unix_port
#else
"started with:
\n
Tcp port: %d Unix socket: %s
\n
"
,
my_progname
,
server_version
,
mysqld_port
,
mysqld_unix_port
#endif
);
end
=
strnmov
(
buff
+
len
,
"Time Id Command Argument
\n
"
,
sizeof
(
buff
)
-
len
);
if
(
my_b_write
(
&
log_file
,
(
byte
*
)
buff
,(
uint
)
(
end
-
buff
))
||
flush_io_cache
(
&
log_file
))
goto
err
;
break
;
}
case
LOG_NEW
:
{
uint
len
;
time_t
skr
=
time
(
NULL
);
struct
tm
tm_tmp
;
localtime_r
(
&
skr
,
&
tm_tmp
);
len
=
my_snprintf
(
buff
,
sizeof
(
buff
),
"# %s, Version: %s at %02d%02d%02d %2d:%02d:%02d
\n
"
,
my_progname
,
server_version
,
tm_tmp
.
tm_year
%
100
,
tm_tmp
.
tm_mon
+
1
,
tm_tmp
.
tm_mday
,
tm_tmp
.
tm_hour
,
tm_tmp
.
tm_min
,
tm_tmp
.
tm_sec
);
if
(
my_b_write
(
&
log_file
,
(
byte
*
)
buff
,
len
)
||
flush_io_cache
(
&
log_file
))
goto
err
;
break
;
}
case
LOG_BIN
:
{
bool
write_file_name_to_index_file
=
0
;
...
...
@@ -1700,13 +2095,9 @@ bool MYSQL_LOG::open(const char *log_name,
my_sync
(
index_file
.
file
,
MYF
(
MY_WME
)))
goto
err
;
}
break
;
}
case
LOG_CLOSED
:
// Impossible
case
LOG_TO_BE_OPENED
:
DBUG_ASSERT
(
1
);
break
;
}
log_state
=
LOG_OPENED
;
DBUG_RETURN
(
0
);
err:
...
...
@@ -1719,12 +2110,12 @@ shutdown the MySQL server and restart it.", name, errno);
end_io_cache
(
&
log_file
);
end_io_cache
(
&
index_file
);
safeFree
(
name
);
log_
typ
e
=
LOG_CLOSED
;
log_
stat
e
=
LOG_CLOSED
;
DBUG_RETURN
(
1
);
}
int
MYSQL_LOG
::
get_current_log
(
LOG_INFO
*
linfo
)
int
MYSQL_
BIN_
LOG
::
get_current_log
(
LOG_INFO
*
linfo
)
{
pthread_mutex_lock
(
&
LOCK_log
);
strmake
(
linfo
->
log_file_name
,
log_file_name
,
sizeof
(
linfo
->
log_file_name
)
-
1
);
...
...
@@ -1811,7 +2202,7 @@ err:
LOG_INFO_IO Got IO error while reading file
*/
int
MYSQL_LOG
::
find_log_pos
(
LOG_INFO
*
linfo
,
const
char
*
log_name
,
int
MYSQL_
BIN_
LOG
::
find_log_pos
(
LOG_INFO
*
linfo
,
const
char
*
log_name
,
bool
need_lock
)
{
int
error
=
0
;
...
...
@@ -1885,7 +2276,7 @@ int MYSQL_LOG::find_log_pos(LOG_INFO *linfo, const char *log_name,
LOG_INFO_IO Got IO error while reading file
*/
int
MYSQL_LOG
::
find_next_log
(
LOG_INFO
*
linfo
,
bool
need_lock
)
int
MYSQL_
BIN_
LOG
::
find_next_log
(
LOG_INFO
*
linfo
,
bool
need_lock
)
{
int
error
=
0
;
uint
length
;
...
...
@@ -1933,12 +2324,11 @@ err:
1 error
*/
bool
MYSQL_LOG
::
reset_logs
(
THD
*
thd
)
bool
MYSQL_
BIN_
LOG
::
reset_logs
(
THD
*
thd
)
{
LOG_INFO
linfo
;
bool
error
=
0
;
const
char
*
save_name
;
enum_log_type
save_log_type
;
DBUG_ENTER
(
"reset_logs"
);
ha_reset_logs
(
thd
);
...
...
@@ -1960,7 +2350,6 @@ bool MYSQL_LOG::reset_logs(THD* thd)
/* Save variables so that we can reopen the log */
save_name
=
name
;
name
=
0
;
// Protect against free
save_log_type
=
log_type
;
close
(
LOG_CLOSE_TO_BE_OPENED
);
/* First delete all old log files */
...
...
@@ -1984,8 +2373,7 @@ bool MYSQL_LOG::reset_logs(THD* thd)
if
(
!
thd
->
slave_thread
)
need_start_event
=
1
;
if
(
!
open_index_file
(
index_file_name
,
0
))
open
(
save_name
,
save_log_type
,
0
,
io_cache_type
,
no_auto_events
,
max_size
,
0
);
open
(
save_name
,
log_type
,
0
,
io_cache_type
,
no_auto_events
,
max_size
,
0
);
my_free
((
gptr
)
save_name
,
MYF
(
0
));
err:
...
...
@@ -2033,7 +2421,7 @@ err:
#ifdef HAVE_REPLICATION
int
MYSQL_LOG
::
purge_first_log
(
struct
st_relay_log_info
*
rli
,
bool
included
)
int
MYSQL_
BIN_
LOG
::
purge_first_log
(
struct
st_relay_log_info
*
rli
,
bool
included
)
{
int
error
;
DBUG_ENTER
(
"purge_first_log"
);
...
...
@@ -2109,7 +2497,7 @@ err:
Update log index_file
*/
int
MYSQL_LOG
::
update_log_index
(
LOG_INFO
*
log_info
,
bool
need_update_threads
)
int
MYSQL_
BIN_
LOG
::
update_log_index
(
LOG_INFO
*
log_info
,
bool
need_update_threads
)
{
if
(
copy_up_file_and_fill
(
&
index_file
,
log_info
->
index_file_start_offset
))
return
LOG_INFO_IO
;
...
...
@@ -2142,7 +2530,7 @@ int MYSQL_LOG::update_log_index(LOG_INFO* log_info, bool need_update_threads)
LOG_INFO_EOF to_log not found
*/
int
MYSQL_LOG
::
purge_logs
(
const
char
*
to_log
,
int
MYSQL_
BIN_
LOG
::
purge_logs
(
const
char
*
to_log
,
bool
included
,
bool
need_mutex
,
bool
need_update_threads
,
...
...
@@ -2228,7 +2616,7 @@ err:
LOG_INFO_PURGE_NO_ROTATE Binary file that can't be rotated
*/
int
MYSQL_LOG
::
purge_logs_before_date
(
time_t
purge_time
)
int
MYSQL_
BIN_
LOG
::
purge_logs_before_date
(
time_t
purge_time
)
{
int
error
;
LOG_INFO
log_info
;
...
...
@@ -2287,7 +2675,7 @@ err:
If file name will be longer then FN_REFLEN it will be truncated
*/
void
MYSQL_LOG
::
make_log_name
(
char
*
buf
,
const
char
*
log_ident
)
void
MYSQL_
BIN_
LOG
::
make_log_name
(
char
*
buf
,
const
char
*
log_ident
)
{
uint
dir_len
=
dirname_length
(
log_file_name
);
if
(
dir_len
>
FN_REFLEN
)
...
...
@@ -2301,29 +2689,48 @@ void MYSQL_LOG::make_log_name(char* buf, const char* log_ident)
Check if we are writing/reading to the given log file
*/
bool
MYSQL_LOG
::
is_active
(
const
char
*
log_file_name_arg
)
bool
MYSQL_
BIN_
LOG
::
is_active
(
const
char
*
log_file_name_arg
)
{
return
!
strcmp
(
log_file_name
,
log_file_name_arg
);
}
/*
Wrappers around new_file_impl to avoid using argument
to control locking. The argument 1) less readable 2) breaks
incapsulation 3) allows external access to the class without
a lock (which is not possible with private new_file_without_locking
method).
*/
void
MYSQL_BIN_LOG
::
new_file
()
{
new_file_impl
(
1
);
}
void
MYSQL_BIN_LOG
::
new_file_without_locking
()
{
new_file_impl
(
0
);
}
/*
Start writing to a new log file or reopen the old file
SYNOPSIS
new_file()
new_file
_impl
()
need_lock Set to 1 if caller has not locked LOCK_log
NOTE
The new file name is stored last in the index file
*/
void
MYSQL_
LOG
::
new_file
(
bool
need_lock
)
void
MYSQL_
BIN_LOG
::
new_file_impl
(
bool
need_lock
)
{
char
new_name
[
FN_REFLEN
],
*
new_name_ptr
,
*
old_name
;
enum_log_type
save_log_type
;
DBUG_ENTER
(
"MYSQL_
LOG::new_file
"
);
DBUG_ENTER
(
"MYSQL_
BIN_LOG::new_file_impl
"
);
if
(
!
is_open
())
{
DBUG_PRINT
(
"info"
,(
"log is closed"
));
...
...
@@ -2389,12 +2796,11 @@ void MYSQL_LOG::new_file(bool need_lock)
signal_update
();
}
old_name
=
name
;
save_log_type
=
log_type
;
name
=
0
;
// Don't free name
close
(
LOG_CLOSE_TO_BE_OPENED
);
/*
Note that at this point, log_
typ
e != LOG_CLOSED (important for is_open()).
Note that at this point, log_
stat
e != LOG_CLOSED (important for is_open()).
*/
/*
...
...
@@ -2406,7 +2812,7 @@ void MYSQL_LOG::new_file(bool need_lock)
trigger temp tables deletion on slaves.
*/
open
(
old_name
,
save_
log_type
,
new_name_ptr
,
open
(
old_name
,
log_type
,
new_name_ptr
,
io_cache_type
,
no_auto_events
,
max_size
,
1
);
my_free
(
old_name
,
MYF
(
0
));
...
...
@@ -2419,11 +2825,11 @@ end:
}
bool
MYSQL_LOG
::
append
(
Log_event
*
ev
)
bool
MYSQL_
BIN_
LOG
::
append
(
Log_event
*
ev
)
{
bool
error
=
0
;
pthread_mutex_lock
(
&
LOCK_log
);
DBUG_ENTER
(
"MYSQL_LOG::append"
);
DBUG_ENTER
(
"MYSQL_
BIN_
LOG::append"
);
DBUG_ASSERT
(
log_file
.
type
==
SEQ_READ_APPEND
);
/*
...
...
@@ -2438,7 +2844,7 @@ bool MYSQL_LOG::append(Log_event* ev)
bytes_written
+=
ev
->
data_written
;
DBUG_PRINT
(
"info"
,(
"max_size: %lu"
,
max_size
));
if
((
uint
)
my_b_append_tell
(
&
log_file
)
>
max_size
)
new_file
(
0
);
new_file
_without_locking
(
);
err:
pthread_mutex_unlock
(
&
LOCK_log
);
...
...
@@ -2447,10 +2853,10 @@ err:
}
bool
MYSQL_LOG
::
appendv
(
const
char
*
buf
,
uint
len
,...)
bool
MYSQL_
BIN_
LOG
::
appendv
(
const
char
*
buf
,
uint
len
,...)
{
bool
error
=
0
;
DBUG_ENTER
(
"MYSQL_LOG::appendv"
);
DBUG_ENTER
(
"MYSQL_
BIN_
LOG::appendv"
);
va_list
(
args
);
va_start
(
args
,
len
);
...
...
@@ -2468,7 +2874,7 @@ bool MYSQL_LOG::appendv(const char* buf, uint len,...)
}
while
((
buf
=
va_arg
(
args
,
const
char
*
))
&&
(
len
=
va_arg
(
args
,
uint
)));
DBUG_PRINT
(
"info"
,(
"max_size: %lu"
,
max_size
));
if
((
uint
)
my_b_append_tell
(
&
log_file
)
>
max_size
)
new_file
(
0
);
new_file
_without_locking
(
);
err:
if
(
!
error
)
...
...
@@ -2477,99 +2883,7 @@ err:
}
/*
Write a command to traditional general log file
SYNOPSIS
write()
event_time command start timestamp
user_host the pointer to the string with user@host info
user_host_len length of the user_host string. this is computed once
and passed to all general log event handlers
thread_id Id of the thread, issued a query
command_type the type of the command being logged
command_type_len the length of the string above
sql_text the very text of the query being executed
sql_text_len the length of sql_text string
DESCRIPTION
Log given command to to normal (not rotable) log file
RETURN
FASE - OK
TRUE - error occured
*/
bool
MYSQL_LOG
::
write
(
time_t
event_time
,
const
char
*
user_host
,
uint
user_host_len
,
int
thread_id
,
const
char
*
command_type
,
uint
command_type_len
,
const
char
*
sql_text
,
uint
sql_text_len
)
{
char
buff
[
32
];
uint
length
=
0
;
char
time_buff
[
MAX_TIME_SIZE
];
struct
tm
start
;
uint
time_buff_len
=
0
;
/* Test if someone closed between the is_open test and lock */
if
(
is_open
())
{
/* Note that my_b_write() assumes it knows the length for this */
if
(
event_time
!=
last_time
)
{
last_time
=
event_time
;
localtime_r
(
&
event_time
,
&
start
);
time_buff_len
=
my_snprintf
(
time_buff
,
MAX_TIME_SIZE
,
"%02d%02d%02d %2d:%02d:%02d"
,
start
.
tm_year
%
100
,
start
.
tm_mon
+
1
,
start
.
tm_mday
,
start
.
tm_hour
,
start
.
tm_min
,
start
.
tm_sec
);
if
(
my_b_write
(
&
log_file
,
(
byte
*
)
&
time_buff
,
time_buff_len
))
goto
err
;
}
else
if
(
my_b_write
(
&
log_file
,
(
byte
*
)
"
\t\t
"
,
2
)
<
0
)
goto
err
;
/* command_type, thread_id */
length
=
my_snprintf
(
buff
,
32
,
"%5ld "
,
thread_id
);
if
(
my_b_write
(
&
log_file
,
(
byte
*
)
buff
,
length
))
goto
err
;
if
(
my_b_write
(
&
log_file
,
(
byte
*
)
command_type
,
command_type_len
))
goto
err
;
if
(
my_b_write
(
&
log_file
,
(
byte
*
)
"
\t
"
,
1
))
goto
err
;
/* sql_text */
if
(
my_b_write
(
&
log_file
,
(
byte
*
)
sql_text
,
sql_text_len
))
goto
err
;
if
(
my_b_write
(
&
log_file
,
(
byte
*
)
"
\n
"
,
1
)
||
flush_io_cache
(
&
log_file
))
goto
err
;
}
return
FALSE
;
err:
if
(
!
write_error
)
{
write_error
=
1
;
sql_print_error
(
ER
(
ER_ERROR_ON_WRITE
),
name
,
errno
);
}
return
TRUE
;
}
bool
MYSQL_LOG
::
flush_and_sync
()
bool
MYSQL_BIN_LOG
::
flush_and_sync
()
{
int
err
=
0
,
fd
=
log_file
.
file
;
safe_mutex_assert_owner
(
&
LOCK_log
);
...
...
@@ -2583,7 +2897,7 @@ bool MYSQL_LOG::flush_and_sync()
return
err
;
}
void
MYSQL_LOG
::
start_union_events
(
THD
*
thd
)
void
MYSQL_
BIN_
LOG
::
start_union_events
(
THD
*
thd
)
{
DBUG_ASSERT
(
!
thd
->
binlog_evt_union
.
do_union
);
thd
->
binlog_evt_union
.
do_union
=
TRUE
;
...
...
@@ -2592,13 +2906,13 @@ void MYSQL_LOG::start_union_events(THD *thd)
thd
->
binlog_evt_union
.
first_query_id
=
thd
->
query_id
;
}
void
MYSQL_LOG
::
stop_union_events
(
THD
*
thd
)
void
MYSQL_
BIN_
LOG
::
stop_union_events
(
THD
*
thd
)
{
DBUG_ASSERT
(
thd
->
binlog_evt_union
.
do_union
);
thd
->
binlog_evt_union
.
do_union
=
FALSE
;
}
bool
MYSQL_LOG
::
is_query_in_union
(
THD
*
thd
,
query_id_t
query_id_param
)
bool
MYSQL_
BIN_
LOG
::
is_query_in_union
(
THD
*
thd
,
query_id_t
query_id_param
)
{
return
(
thd
->
binlog_evt_union
.
do_union
&&
query_id_param
>=
thd
->
binlog_evt_union
.
first_query_id
);
...
...
@@ -2705,9 +3019,10 @@ THD::binlog_set_pending_rows_event(Rows_log_event* ev)
(either cached binlog if transaction, or disk binlog). Sets a new pending
event.
*/
int
MYSQL_LOG
::
flush_and_set_pending_rows_event
(
THD
*
thd
,
Rows_log_event
*
event
)
int
MYSQL_BIN_LOG
::
flush_and_set_pending_rows_event
(
THD
*
thd
,
Rows_log_event
*
event
)
{
DBUG_ENTER
(
"MYSQL_LOG::flush_and_set_pending_rows_event(event)"
);
DBUG_ENTER
(
"MYSQL_
BIN_
LOG::flush_and_set_pending_rows_event(event)"
);
DBUG_ASSERT
(
thd
->
current_stmt_binlog_row_based
&&
mysql_bin_log
.
is_open
());
DBUG_PRINT
(
"enter"
,
(
"event=%p"
,
event
));
...
...
@@ -2791,11 +3106,11 @@ int MYSQL_LOG::flush_and_set_pending_rows_event(THD *thd, Rows_log_event* event)
Write an event to the binary log
*/
bool
MYSQL_LOG
::
write
(
Log_event
*
event_info
)
bool
MYSQL_
BIN_
LOG
::
write
(
Log_event
*
event_info
)
{
THD
*
thd
=
event_info
->
thd
;
bool
error
=
1
;
DBUG_ENTER
(
"MYSQL_LOG::write(Log_event *)"
);
DBUG_ENTER
(
"MYSQL_
BIN_
LOG::write(Log_event *)"
);
if
(
thd
->
binlog_evt_union
.
do_union
)
{
...
...
@@ -3015,14 +3330,14 @@ bool general_log_print(THD *thd, enum enum_server_command command,
return
error
;
}
void
MYSQL_LOG
::
rotate_and_purge
(
uint
flags
)
void
MYSQL_
BIN_
LOG
::
rotate_and_purge
(
uint
flags
)
{
if
(
!
(
flags
&
RP_LOCK_LOG_IS_ALREADY_LOCKED
))
pthread_mutex_lock
(
&
LOCK_log
);
if
((
flags
&
RP_FORCE_ROTATE
)
||
(
my_b_tell
(
&
log_file
)
>=
(
my_off_t
)
max_size
))
{
new_file
(
0
);
new_file
_without_locking
(
);
#ifdef HAVE_REPLICATION
if
(
expire_logs_days
)
{
...
...
@@ -3036,7 +3351,7 @@ void MYSQL_LOG::rotate_and_purge(uint flags)
pthread_mutex_unlock
(
&
LOCK_log
);
}
uint
MYSQL_LOG
::
next_file_id
()
uint
MYSQL_
BIN_
LOG
::
next_file_id
()
{
uint
res
;
pthread_mutex_lock
(
&
LOCK_log
);
...
...
@@ -3067,9 +3382,9 @@ uint MYSQL_LOG::next_file_id()
that the same updates are run on the slave.
*/
bool
MYSQL_LOG
::
write
(
THD
*
thd
,
IO_CACHE
*
cache
,
Log_event
*
commit_event
)
bool
MYSQL_
BIN_
LOG
::
write
(
THD
*
thd
,
IO_CACHE
*
cache
,
Log_event
*
commit_event
)
{
DBUG_ENTER
(
"MYSQL_LOG::write(THD *, IO_CACHE *, Log_event *)"
);
DBUG_ENTER
(
"MYSQL_
BIN_
LOG::write(THD *, IO_CACHE *, Log_event *)"
);
VOID
(
pthread_mutex_lock
(
&
LOCK_log
));
if
(
likely
(
is_open
()))
// Should always be true
...
...
@@ -3164,148 +3479,6 @@ err:
}
/*
Log a query to the traditional slow log file
SYNOPSIS
write()
thd THD of the query
current_time current timestamp
query_start_arg command start timestamp
user_host the pointer to the string with user@host info
user_host_len length of the user_host string. this is computed once
and passed to all general log event handlers
query_time Amount of time the query took to execute (in seconds)
lock_time Amount of time the query was locked (in seconds)
is_command The flag, which determines, whether the sql_text is a
query or an administrator command.
sql_text the very text of the query or administrator command
processed
sql_text_len the length of sql_text string
DESCRIPTION
Log a query to the slow log file.
RETURN
FALSE - OK
TRUE - error occured
*/
bool
MYSQL_LOG
::
write
(
THD
*
thd
,
time_t
current_time
,
time_t
query_start_arg
,
const
char
*
user_host
,
uint
user_host_len
,
longlong
query_time
,
longlong
lock_time
,
bool
is_command
,
const
char
*
sql_text
,
uint
sql_text_len
)
{
bool
error
=
0
;
DBUG_ENTER
(
"MYSQL_LOG::write"
);
if
(
!
is_open
())
DBUG_RETURN
(
0
);
if
(
is_open
())
{
// Safety agains reopen
int
tmp_errno
=
0
;
char
buff
[
80
],
*
end
;
uint
buff_len
;
end
=
buff
;
if
(
!
(
specialflag
&
SPECIAL_SHORT_LOG_FORMAT
))
{
Security_context
*
sctx
=
thd
->
security_ctx
;
if
(
current_time
!=
last_time
)
{
last_time
=
current_time
;
struct
tm
start
;
localtime_r
(
&
current_time
,
&
start
);
buff_len
=
my_snprintf
(
buff
,
sizeof
buff
,
"# Time: %02d%02d%02d %2d:%02d:%02d
\n
"
,
start
.
tm_year
%
100
,
start
.
tm_mon
+
1
,
start
.
tm_mday
,
start
.
tm_hour
,
start
.
tm_min
,
start
.
tm_sec
);
/* Note that my_b_write() assumes it knows the length for this */
if
(
my_b_write
(
&
log_file
,
(
byte
*
)
buff
,
buff_len
))
tmp_errno
=
errno
;
}
if
(
my_b_printf
(
&
log_file
,
"# User@Host: "
,
sizeof
(
"# User@Host: "
)
-
1
))
tmp_errno
=
errno
;
if
(
my_b_printf
(
&
log_file
,
user_host
,
user_host_len
))
tmp_errno
=
errno
;
if
(
my_b_write
(
&
log_file
,
(
byte
*
)
"
\n
"
,
1
))
tmp_errno
=
errno
;
}
/* For slow query log */
if
(
my_b_printf
(
&
log_file
,
"# Query_time: %lu Lock_time: %lu"
" Rows_sent: %lu Rows_examined: %lu
\n
"
,
(
ulong
)
query_time
,
(
ulong
)
lock_time
,
(
ulong
)
thd
->
sent_row_count
,
(
ulong
)
thd
->
examined_row_count
)
==
(
uint
)
-
1
)
tmp_errno
=
errno
;
if
(
thd
->
db
&&
strcmp
(
thd
->
db
,
db
))
{
// Database changed
if
(
my_b_printf
(
&
log_file
,
"use %s;
\n
"
,
thd
->
db
)
==
(
uint
)
-
1
)
tmp_errno
=
errno
;
strmov
(
db
,
thd
->
db
);
}
if
(
thd
->
last_insert_id_used
)
{
end
=
strmov
(
end
,
",last_insert_id="
);
end
=
longlong10_to_str
((
longlong
)
thd
->
current_insert_id
,
end
,
-
10
);
}
// Save value if we do an insert.
if
(
thd
->
insert_id_used
)
{
if
(
!
(
specialflag
&
SPECIAL_SHORT_LOG_FORMAT
))
{
end
=
strmov
(
end
,
",insert_id="
);
end
=
longlong10_to_str
((
longlong
)
thd
->
last_insert_id
,
end
,
-
10
);
}
}
/*
This info used to show up randomly, depending on whether the query
checked the query start time or not. now we always write current
timestamp to the slow log
*/
end
=
strmov
(
end
,
",timestamp="
);
end
=
int10_to_str
((
long
)
current_time
,
end
,
10
);
if
(
end
!=
buff
)
{
*
end
++=
';'
;
*
end
=
'\n'
;
if
(
my_b_write
(
&
log_file
,
(
byte
*
)
"SET "
,
4
)
||
my_b_write
(
&
log_file
,
(
byte
*
)
buff
+
1
,(
uint
)
(
end
-
buff
)))
tmp_errno
=
errno
;
}
if
(
is_command
)
{
end
=
strxmov
(
buff
,
"# administrator command: "
,
NullS
);
buff_len
=
(
ulong
)
(
end
-
buff
);
my_b_write
(
&
log_file
,
(
byte
*
)
buff
,
buff_len
);
}
if
(
my_b_write
(
&
log_file
,
(
byte
*
)
sql_text
,
sql_text_len
)
||
my_b_write
(
&
log_file
,
(
byte
*
)
";
\n
"
,
2
)
||
flush_io_cache
(
&
log_file
))
tmp_errno
=
errno
;
if
(
tmp_errno
)
{
error
=
1
;
if
(
!
write_error
)
{
write_error
=
1
;
sql_print_error
(
ER
(
ER_ERROR_ON_WRITE
),
name
,
error
);
}
}
}
DBUG_RETURN
(
error
);
}
/*
Wait until we get a signal that the binary log has been updated
...
...
@@ -3322,7 +3495,7 @@ bool MYSQL_LOG::write(THD *thd, time_t current_time, time_t query_start_arg,
THD::enter_cond() (see NOTES in sql_class.h).
*/
void
MYSQL_LOG
::
wait_for_update
(
THD
*
thd
,
bool
is_slave
)
void
MYSQL_
BIN_
LOG
::
wait_for_update
(
THD
*
thd
,
bool
is_slave
)
{
const
char
*
old_msg
;
DBUG_ENTER
(
"wait_for_update"
);
...
...
@@ -3355,11 +3528,11 @@ void MYSQL_LOG::wait_for_update(THD* thd, bool is_slave)
The internal structures are not freed until cleanup() is called
*/
void
MYSQL_LOG
::
close
(
uint
exiting
)
void
MYSQL_
BIN_
LOG
::
close
(
uint
exiting
)
{
// One can't set log_type here!
DBUG_ENTER
(
"MYSQL_LOG::close"
);
DBUG_ENTER
(
"MYSQL_
BIN_
LOG::close"
);
DBUG_PRINT
(
"enter"
,(
"exiting: %d"
,
(
int
)
exiting
));
if
(
log_
type
!=
LOG_CLOSED
&&
log_type
!=
LOG_TO_BE
_OPENED
)
if
(
log_
state
==
LOG
_OPENED
)
{
#ifdef HAVE_REPLICATION
if
(
log_type
==
LOG_BIN
&&
!
no_auto_events
&&
...
...
@@ -3371,7 +3544,6 @@ void MYSQL_LOG::close(uint exiting)
signal_update
();
}
#endif
/* HAVE_REPLICATION */
end_io_cache
(
&
log_file
);
/* don't pwrite in a file opened with O_APPEND - it doesn't work */
if
(
log_file
.
type
==
WRITE_CACHE
&&
log_type
==
LOG_BIN
)
...
...
@@ -3381,16 +3553,8 @@ void MYSQL_LOG::close(uint exiting)
my_pwrite
(
log_file
.
file
,
&
flags
,
1
,
offset
,
MYF
(
0
));
}
if
(
my_sync
(
log_file
.
file
,
MYF
(
MY_WME
))
&&
!
write_error
)
{
write_error
=
1
;
sql_print_error
(
ER
(
ER_ERROR_ON_WRITE
),
name
,
errno
);
}
if
(
my_close
(
log_file
.
file
,
MYF
(
MY_WME
))
&&
!
write_error
)
{
write_error
=
1
;
sql_print_error
(
ER
(
ER_ERROR_ON_WRITE
),
name
,
errno
);
}
/* this will cleanup IO_CACHE, sync and close the file */
MYSQL_LOG
::
close
(
exiting
);
}
/*
...
...
@@ -3407,13 +3571,13 @@ void MYSQL_LOG::close(uint exiting)
sql_print_error
(
ER
(
ER_ERROR_ON_WRITE
),
index_file_name
,
errno
);
}
}
log_
typ
e
=
(
exiting
&
LOG_CLOSE_TO_BE_OPENED
)
?
LOG_TO_BE_OPENED
:
LOG_CLOSED
;
log_
stat
e
=
(
exiting
&
LOG_CLOSE_TO_BE_OPENED
)
?
LOG_TO_BE_OPENED
:
LOG_CLOSED
;
safeFree
(
name
);
DBUG_VOID_RETURN
;
}
void
MYSQL_LOG
::
set_max_size
(
ulong
max_size_arg
)
void
MYSQL_
BIN_
LOG
::
set_max_size
(
ulong
max_size_arg
)
{
/*
We need to take locks, otherwise this may happen:
...
...
@@ -3422,7 +3586,7 @@ void MYSQL_LOG::set_max_size(ulong max_size_arg)
uses the old_max_size argument, so max_size_arg has been overwritten and
it's like if the SET command was never run.
*/
DBUG_ENTER
(
"MYSQL_LOG::set_max_size"
);
DBUG_ENTER
(
"MYSQL_
BIN_
LOG::set_max_size"
);
pthread_mutex_lock
(
&
LOCK_log
);
if
(
is_open
())
max_size
=
max_size_arg
;
...
...
@@ -3571,9 +3735,9 @@ bool flush_error_log()
return
result
;
}
void
MYSQL_LOG
::
signal_update
()
void
MYSQL_
BIN_
LOG
::
signal_update
()
{
DBUG_ENTER
(
"MYSQL_LOG::signal_update"
);
DBUG_ENTER
(
"MYSQL_
BIN_
LOG::signal_update"
);
pthread_cond_broadcast
(
&
update_cond
);
DBUG_VOID_RETURN
;
}
...
...
@@ -4176,7 +4340,7 @@ int TC_LOG::using_heuristic_recover()
}
/****** transaction coordinator log for 2pc - binlog() based solution ******/
#define TC_LOG_BINLOG MYSQL_LOG
#define TC_LOG_BINLOG MYSQL_
BIN_
LOG
/*
TODO keep in-memory list of prepared transactions
...
...
sql/log.h
View file @
86f2f582
...
...
@@ -147,33 +147,85 @@ typedef struct st_log_info
class
Log_event
;
class
Rows_log_event
;
enum
enum_log_type
{
LOG_CLOSED
,
LOG_TO_BE_OPENED
,
LOG_NORMAL
,
LOG_NEW
,
LOG_BIN
};
enum
enum_log_type
{
LOG_UNKNOWN
,
LOG_NORMAL
,
LOG_BIN
};
enum
enum_log_state
{
LOG_OPENED
,
LOG_CLOSED
,
LOG_TO_BE_OPENED
};
/*
TODO split MYSQL_LOG into base MYSQL_LOG and
MYSQL_QUERY_LOG, MYSQL_SLOW_LOG, MYSQL_BIN_LOG
most of the code from MYSQL_LOG should be in the MYSQL_BIN_LOG
only (TC_LOG included)
TODO use mmap instead of IO_CACHE for binlog
(mmap+fsync is two times faster than write+fsync)
*/
class
MYSQL_LOG
:
public
TC_LOG
class
MYSQL_LOG
{
public:
MYSQL_LOG
();
void
init_pthread_objects
();
void
cleanup
();
bool
open
(
const
char
*
log_name
,
enum_log_type
log_type
,
const
char
*
new_name
,
enum
cache_type
io_cache_type_arg
);
void
init
(
enum_log_type
log_type_arg
,
enum
cache_type
io_cache_type_arg
);
void
close
(
uint
exiting
);
inline
bool
is_open
()
{
return
log_state
!=
LOG_CLOSED
;
}
const
char
*
generate_name
(
const
char
*
log_name
,
const
char
*
suffix
,
bool
strip_ext
,
char
*
buff
);
int
generate_new_name
(
char
*
new_name
,
const
char
*
log_name
);
protected:
/* LOCK_log is inited by init_pthread_objects() */
pthread_mutex_t
LOCK_log
;
char
*
name
;
char
log_file_name
[
FN_REFLEN
];
char
time_buff
[
20
],
db
[
NAME_LEN
+
1
];
bool
write_error
,
inited
;
IO_CACHE
log_file
;
enum_log_type
log_type
;
volatile
enum_log_state
log_state
;
enum
cache_type
io_cache_type
;
friend
class
Log_event
;
};
class
MYSQL_QUERY_LOG
:
public
MYSQL_LOG
{
public:
MYSQL_QUERY_LOG
()
:
last_time
(
0
)
{}
void
reopen_file
();
bool
write
(
time_t
event_time
,
const
char
*
user_host
,
uint
user_host_len
,
int
thread_id
,
const
char
*
command_type
,
uint
command_type_len
,
const
char
*
sql_text
,
uint
sql_text_len
);
bool
write
(
THD
*
thd
,
time_t
current_time
,
time_t
query_start_arg
,
const
char
*
user_host
,
uint
user_host_len
,
longlong
query_time
,
longlong
lock_time
,
bool
is_command
,
const
char
*
sql_text
,
uint
sql_text_len
);
bool
open_slow_log
(
const
char
*
log_name
)
{
char
buf
[
FN_REFLEN
];
return
open
(
generate_name
(
log_name
,
"-slow.log"
,
0
,
buf
),
LOG_NORMAL
,
0
,
WRITE_CACHE
);
}
bool
open_query_log
(
const
char
*
log_name
)
{
char
buf
[
FN_REFLEN
];
return
open
(
generate_name
(
log_name
,
".log"
,
0
,
buf
),
LOG_NORMAL
,
0
,
WRITE_CACHE
);
}
private:
time_t
last_time
;
};
class
MYSQL_BIN_LOG
:
public
TC_LOG
,
private
MYSQL_LOG
{
private:
/* LOCK_log and LOCK_index are inited by init_pthread_objects() */
pthread_mutex_t
LOCK_
log
,
LOCK_
index
;
pthread_mutex_t
LOCK_index
;
pthread_mutex_t
LOCK_prep_xids
;
pthread_cond_t
COND_prep_xids
;
pthread_cond_t
update_cond
;
ulonglong
bytes_written
;
time_t
last_time
,
query_start
;
IO_CACHE
log_file
;
IO_CACHE
index_file
;
char
*
name
;
char
time_buff
[
20
],
db
[
NAME_LEN
+
1
];
char
log_file_name
[
FN_REFLEN
],
index_file_name
[
FN_REFLEN
];
char
index_file_name
[
FN_REFLEN
];
/*
The max size before rotation (usable only if log_type == LOG_BIN: binary
logs and relay logs).
...
...
@@ -186,13 +238,10 @@ class MYSQL_LOG: public TC_LOG
*/
ulong
max_size
;
ulong
prepared_xids
;
/* for tc log - number of xids to remember */
volatile
enum_log_type
log_type
;
enum
cache_type
io_cache_type
;
// current file sequence number for load data infile binary logging
uint
file_id
;
uint
open_count
;
// For replication
int
readers_count
;
bool
write_error
,
inited
;
bool
need_start_event
;
/*
no_auto_events means we don't want any of these automatic events :
...
...
@@ -202,13 +251,21 @@ class MYSQL_LOG: public TC_LOG
In 5.0 it's 0 for relay logs too!
*/
bool
no_auto_events
;
friend
class
Log_event
;
ulonglong
m_table_map_version
;
int
write_to_file
(
IO_CACHE
*
cache
);
/*
This is used to start writing to a new log file. The difference from
new_file() is locking. new_file_without_locking() does not acquire
LOCK_log.
*/
void
new_file_without_locking
();
void
new_file_impl
(
bool
need_lock
);
public:
MYSQL_LOG
::
generate_name
;
MYSQL_LOG
::
is_open
;
/*
These describe the log's format. This is used only for relay logs.
_for_exec is used by the SQL thread, _for_queue by the I/O thread. It's
...
...
@@ -220,9 +277,9 @@ public:
Format_description_log_event
*
description_event_for_exec
,
*
description_event_for_queue
;
MYSQL_LOG
();
MYSQL_
BIN_
LOG
();
/*
note that there's no destructor ~MYSQL_LOG() !
note that there's no destructor ~MYSQL_
BIN_
LOG() !
The reason is that we don't want it to be automatically called
on exit() - but only during the correct shutdown process
*/
...
...
@@ -264,9 +321,7 @@ public:
void
signal_update
();
void
wait_for_update
(
THD
*
thd
,
bool
master_or_slave
);
void
set_need_start_event
()
{
need_start_event
=
1
;
}
void
init
(
enum_log_type
log_type_arg
,
enum
cache_type
io_cache_type_arg
,
bool
no_auto_events_arg
,
ulong
max_size
);
void
init
(
bool
no_auto_events_arg
,
ulong
max_size
);
void
init_pthread_objects
();
void
cleanup
();
bool
open
(
const
char
*
log_name
,
...
...
@@ -275,35 +330,10 @@ public:
enum
cache_type
io_cache_type_arg
,
bool
no_auto_events_arg
,
ulong
max_size
,
bool
null_created
);
const
char
*
generate_name
(
const
char
*
log_name
,
const
char
*
suffix
,
bool
strip_ext
,
char
*
buff
);
/* simplified open_xxx wrappers for the gigantic open above */
bool
open_query_log
(
const
char
*
log_name
)
{
char
buf
[
FN_REFLEN
];
return
open
(
generate_name
(
log_name
,
".log"
,
0
,
buf
),
LOG_NORMAL
,
0
,
WRITE_CACHE
,
0
,
0
,
0
);
}
bool
open_slow_log
(
const
char
*
log_name
)
{
char
buf
[
FN_REFLEN
];
return
open
(
generate_name
(
log_name
,
"-slow.log"
,
0
,
buf
),
LOG_NORMAL
,
0
,
WRITE_CACHE
,
0
,
0
,
0
);
}
bool
open_index_file
(
const
char
*
index_file_name_arg
,
const
char
*
log_name
);
void
new_file
(
bool
need_lock
);
/* log a command to the old-fashioned general log */
bool
write
(
time_t
event_time
,
const
char
*
user_host
,
uint
user_host_len
,
int
thread_id
,
const
char
*
command_type
,
uint
command_type_len
,
const
char
*
sql_text
,
uint
sql_text_len
);
/* log a query to the old-fashioned slow query log */
bool
write
(
THD
*
thd
,
time_t
current_time
,
time_t
query_start_arg
,
const
char
*
user_host
,
uint
user_host_len
,
longlong
query_time
,
longlong
lock_time
,
bool
is_command
,
const
char
*
sql_text
,
uint
sql_text_len
);
/* Use this to start writing a new log file */
void
new_file
();
bool
write
(
Log_event
*
event_info
);
// binary log write
bool
write
(
THD
*
thd
,
IO_CACHE
*
cache
,
Log_event
*
commit_event
);
...
...
@@ -319,7 +349,6 @@ public:
bool
appendv
(
const
char
*
buf
,
uint
len
,...);
bool
append
(
Log_event
*
ev
);
int
generate_new_name
(
char
*
new_name
,
const
char
*
old_name
);
void
make_log_name
(
char
*
buf
,
const
char
*
log_ident
);
bool
is_active
(
const
char
*
log_file_name
);
int
update_log_index
(
LOG_INFO
*
linfo
,
bool
need_update_threads
);
...
...
@@ -339,7 +368,6 @@ public:
int
find_next_log
(
LOG_INFO
*
linfo
,
bool
need_mutex
);
int
get_current_log
(
LOG_INFO
*
linfo
);
uint
next_file_id
();
inline
bool
is_open
()
{
return
log_type
!=
LOG_CLOSED
;
}
inline
char
*
get_index_fname
()
{
return
index_file_name
;}
inline
char
*
get_log_fname
()
{
return
log_file_name
;
}
inline
char
*
get_name
()
{
return
name
;
}
...
...
@@ -416,7 +444,8 @@ public:
class
Log_to_file_event_handler
:
public
Log_event_handler
{
MYSQL_LOG
mysql_log
,
mysql_slow_log
;
MYSQL_QUERY_LOG
mysql_log
;
MYSQL_QUERY_LOG
mysql_slow_log
;
bool
is_initialized
;
public:
Log_to_file_event_handler
()
:
is_initialized
(
FALSE
)
...
...
sql/log_event.h
View file @
86f2f582
...
...
@@ -470,7 +470,7 @@ enum Int_event_type
#ifndef MYSQL_CLIENT
class
String
;
class
MYSQL_LOG
;
class
MYSQL_
BIN_
LOG
;
class
THD
;
#endif
...
...
sql/mysql_priv.h
View file @
86f2f582
...
...
@@ -1537,7 +1537,7 @@ extern char *default_tz_name;
extern
my_bool
opt_large_pages
;
extern
uint
opt_large_page_size
;
extern
MYSQL_LOG
mysql_bin_log
;
extern
MYSQL_
BIN_
LOG
mysql_bin_log
;
extern
LOGGER
logger
;
extern
TABLE_LIST
general_log
,
slow_log
;
extern
FILE
*
bootstrap_file
;
...
...
sql/mysqld.cc
View file @
86f2f582
...
...
@@ -2635,10 +2635,10 @@ static int init_common_variables(const char *conf_file_name, int argc,
global_system_variables
.
time_zone
=
my_tz_SYSTEM
;
/*
Init mutexes for the global MYSQL_LOG objects.
Init mutexes for the global MYSQL_
BIN_
LOG objects.
As safe_mutex depends on what MY_INIT() does, we can't init the mutexes of
global MYSQL_
LOGs in their constructors, because then they would be inited
before MY_INIT(). So we do it here.
global MYSQL_
BIN_LOGs in their constructors, because then they would be
inited
before MY_INIT(). So we do it here.
*/
mysql_bin_log
.
init_pthread_objects
();
...
...
sql/rpl_injector.cc
View file @
86f2f582
...
...
@@ -25,7 +25,7 @@
/* inline since it's called below */
inline
injector
::
transaction
::
transaction
(
MYSQL_LOG
*
log
,
THD
*
thd
)
injector
::
transaction
::
transaction
(
MYSQL_
BIN_
LOG
*
log
,
THD
*
thd
)
:
m_state
(
START_STATE
),
m_thd
(
thd
)
{
/*
...
...
sql/rpl_injector.h
View file @
86f2f582
...
...
@@ -26,7 +26,7 @@
/* Forward declarations */
class
handler
;
class
MYSQL_LOG
;
class
MYSQL_
BIN_
LOG
;
class
st_table
;
typedef
st_table
TABLE
;
...
...
@@ -219,7 +219,7 @@ public:
private:
/* Only the injector may construct these object */
transaction
(
MYSQL_LOG
*
,
THD
*
);
transaction
(
MYSQL_
BIN_
LOG
*
,
THD
*
);
void
swap
(
transaction
&
o
)
{
/* std::swap(m_start_pos, o.m_start_pos); */
...
...
sql/rpl_rli.h
View file @
86f2f582
...
...
@@ -69,7 +69,7 @@ typedef struct st_relay_log_info
Protected with internal locks.
Must get data_lock when resetting the logs.
*/
MYSQL_LOG
relay_log
;
MYSQL_
BIN_
LOG
relay_log
;
LOG_INFO
linfo
;
IO_CACHE
cache_buf
,
*
cur_log
;
...
...
sql/slave.cc
View file @
86f2f582
...
...
@@ -4658,7 +4658,7 @@ static Log_event* next_event(RELAY_LOG_INFO* rli)
When the relay log is created when the I/O thread starts, easy: the
master will send the description event and we will queue it.
But if the relay log is created by new_file(): then the solution is:
MYSQL_LOG::open() will write the buffered description event.
MYSQL_
BIN_
LOG::open() will write the buffered description event.
*/
if
((
ev
=
Log_event
::
read_log_event
(
cur_log
,
0
,
rli
->
relay_log
.
description_event_for_exec
)))
...
...
@@ -4920,7 +4920,8 @@ err:
Rotate a relay log (this is used only by FLUSH LOGS; the automatic rotation
because of size is simpler because when we do it we already have all relevant
locks; here we don't, so this function is mainly taking locks).
Returns nothing as we cannot catch any error (MYSQL_LOG::new_file() is void).
Returns nothing as we cannot catch any error (MYSQL_BIN_LOG::new_file()
is void).
*/
void
rotate_relay_log
(
MASTER_INFO
*
mi
)
...
...
@@ -4942,7 +4943,7 @@ void rotate_relay_log(MASTER_INFO* mi)
}
/* If the relay log is closed, new_file() will do nothing. */
rli
->
relay_log
.
new_file
(
1
);
rli
->
relay_log
.
new_file
();
/*
We harvest now, because otherwise BIN_LOG_HEADER_SIZE will not immediately
...
...
sql/slave.h
View file @
86f2f582
...
...
@@ -73,7 +73,7 @@
run_lock protects all information about the run state: slave_running, and the
existence of the I/O thread (to stop/start it, you need this mutex).
data_lock protects some moving members of the struct: counters (log name,
position) and relay log (MYSQL_LOG object).
position) and relay log (MYSQL_
BIN_
LOG object).
In RELAY_LOG_INFO: run_lock, data_lock
see MASTER_INFO
...
...
@@ -81,7 +81,7 @@
Order of acquisition: if you want to have LOCK_active_mi and a run_lock, you
must acquire LOCK_active_mi first.
In MYSQL_LOG: LOCK_log, LOCK_index of the binlog and the relay log
In MYSQL_
BIN_
LOG: LOCK_log, LOCK_index of the binlog and the relay log
LOCK_log: when you write to it. LOCK_index: when you create/delete a binlog
(so that you have to update the .index file).
*/
...
...
storage/csv/ha_tina.cc
View file @
86f2f582
...
...
@@ -61,7 +61,7 @@ TODO:
/* The file extension */
#define CSV_EXT ".CSV" // The data file
#define CSN_EXT ".CSN" // Files used during repair
#define CSN_EXT ".CSN" // Files used during repair
and update
#define CSM_EXT ".CSM" // Meta file
...
...
@@ -77,6 +77,53 @@ static int tina_init= 0;
static
handler
*
tina_create_handler
(
TABLE_SHARE
*
table
,
MEM_ROOT
*
mem_root
);
static
int
tina_init_func
();
off_t
Transparent_file
::
read_next
()
{
off_t
bytes_read
;
/*
No need to seek here, as the file managed by Transparent_file class
always points to upper_bound byte
*/
if
((
bytes_read
=
my_read
(
filedes
,
buff
,
buff_size
,
MYF
(
0
)))
==
MY_FILE_ERROR
)
return
-
1
;
/* end of file */
if
(
!
bytes_read
)
return
-
1
;
lower_bound
=
upper_bound
;
upper_bound
+=
bytes_read
;
return
lower_bound
;
}
char
Transparent_file
::
get_value
(
off_t
offset
)
{
off_t
bytes_read
;
/* check boundaries */
if
((
lower_bound
<=
offset
)
&&
(
offset
<
upper_bound
))
return
buff
[
offset
-
lower_bound
];
else
{
VOID
(
my_seek
(
filedes
,
offset
,
MY_SEEK_SET
,
MYF
(
0
)));
/* read appropriate portion of the file */
if
((
bytes_read
=
my_read
(
filedes
,
buff
,
buff_size
,
MYF
(
0
)))
==
MY_FILE_ERROR
)
return
0
;
lower_bound
=
offset
;
upper_bound
=
lower_bound
+
bytes_read
;
/* end of file */
if
(
upper_bound
==
offset
)
return
0
;
return
buff
[
0
];
}
}
handlerton
tina_hton
;
/*****************************************************************************
...
...
@@ -92,7 +139,7 @@ int sort_set (tina_set *a, tina_set *b)
We assume that intervals do not intersect. So, it is enought to compare
any two points. Here we take start of intervals for comparison.
*/
return
(
a
->
begin
>
b
->
begin
?
-
1
:
(
a
->
begin
<
b
->
begin
?
1
:
0
)
);
return
(
a
->
begin
>
b
->
begin
?
1
:
(
a
->
begin
<
b
->
begin
?
-
1
:
0
)
);
}
static
byte
*
tina_get_key
(
TINA_SHARE
*
share
,
uint
*
length
,
...
...
@@ -102,55 +149,6 @@ static byte* tina_get_key(TINA_SHARE *share,uint *length,
return
(
byte
*
)
share
->
table_name
;
}
/*
Reloads the mmap file.
*/
int
get_mmap
(
TINA_SHARE
*
share
,
int
write
)
{
DBUG_ENTER
(
"ha_tina::get_mmap"
);
#ifdef __NETWARE__
my_message
(
errno
,
"Sorry, no mmap() on Netware"
,
0
);
DBUG_ASSERT
(
0
);
DBUG_RETURN
(
1
);
#else
if
(
share
->
mapped_file
&&
my_munmap
(
share
->
mapped_file
,
share
->
file_stat
.
st_size
))
DBUG_RETURN
(
1
);
if
(
my_fstat
(
share
->
data_file
,
&
share
->
file_stat
,
MYF
(
MY_WME
))
==
-
1
)
DBUG_RETURN
(
1
);
if
(
share
->
file_stat
.
st_size
)
{
if
(
write
)
share
->
mapped_file
=
(
byte
*
)
my_mmap
(
NULL
,
share
->
file_stat
.
st_size
,
PROT_READ
|
PROT_WRITE
,
MAP_SHARED
,
share
->
data_file
,
0
);
else
share
->
mapped_file
=
(
byte
*
)
my_mmap
(
NULL
,
share
->
file_stat
.
st_size
,
PROT_READ
,
MAP_PRIVATE
,
share
->
data_file
,
0
);
if
((
share
->
mapped_file
==
MAP_FAILED
))
{
/*
Bad idea you think? See the problem is that nothing actually checks
the return value of ::rnd_init(), so tossing an error is about
it for us.
Never going to happen right? :)
*/
my_message
(
errno
,
"Woops, blew up opening a mapped file"
,
0
);
DBUG_ASSERT
(
0
);
DBUG_RETURN
(
1
);
}
}
else
share
->
mapped_file
=
NULL
;
DBUG_RETURN
(
0
);
#endif
/* __NETWARE__ */
}
static
int
tina_init_func
()
{
if
(
!
tina_init
)
...
...
@@ -191,6 +189,7 @@ static TINA_SHARE *get_share(const char *table_name, TABLE *table)
{
TINA_SHARE
*
share
;
char
meta_file_name
[
FN_REFLEN
];
MY_STAT
file_stat
;
/* Stat information for the data file */
char
*
tmp_name
;
uint
length
;
...
...
@@ -223,6 +222,8 @@ static TINA_SHARE *get_share(const char *table_name, TABLE *table)
share
->
table_name
=
tmp_name
;
share
->
crashed
=
FALSE
;
share
->
rows_recorded
=
0
;
share
->
update_file_opened
=
FALSE
;
share
->
tina_write_opened
=
FALSE
;
strmov
(
share
->
table_name
,
table_name
);
fn_format
(
share
->
data_file_name
,
table_name
,
""
,
CSV_EXT
,
MY_REPLACE_EXT
|
MY_UNPACK_FILENAME
);
...
...
@@ -244,33 +245,21 @@ static TINA_SHARE *get_share(const char *table_name, TABLE *table)
share
->
crashed
=
TRUE
;
/*
After we read, we set the file to dirty. When we close, we will do the
opposite. If the meta file will not open we assume it is crashed and
If the meta file will not open we assume it is crashed and
mark it as such.
*/
if
(
read_meta_file
(
share
->
meta_file
,
&
share
->
rows_recorded
))
share
->
crashed
=
TRUE
;
else
(
void
)
write_meta_file
(
share
->
meta_file
,
share
->
rows_recorded
,
TRUE
);
if
((
share
->
data_file
=
my_open
(
share
->
data_file_name
,
O_RDWR
|
O_APPEND
,
MYF
(
0
)))
==
-
1
)
if
(
my_stat
(
share
->
data_file_name
,
&
file_stat
,
MYF
(
MY_WME
))
==
NULL
)
goto
error2
;
share
->
mapped_file
=
NULL
;
// We don't know the state as we just allocated it
if
(
get_mmap
(
share
,
0
)
>
0
)
goto
error3
;
/* init file length value used by readers */
share
->
saved_data_file_length
=
share
->
file_stat
.
st_size
;
share
->
saved_data_file_length
=
file_stat
.
st_size
;
}
share
->
use_count
++
;
pthread_mutex_unlock
(
&
tina_mutex
);
return
share
;
error3:
my_close
(
share
->
data_file
,
MYF
(
0
));
error2:
thr_lock_delete
(
&
share
->
lock
);
pthread_mutex_destroy
(
&
share
->
mutex
);
...
...
@@ -398,6 +387,30 @@ bool ha_tina::check_and_repair(THD *thd)
}
int
ha_tina
::
init_tina_writer
()
{
DBUG_ENTER
(
"ha_tina::init_tina_writer"
);
/*
Mark the file as crashed. We will set the flag back when we close
the file. In the case of the crash it will remain marked crashed,
which enforce recovery.
*/
(
void
)
write_meta_file
(
share
->
meta_file
,
share
->
rows_recorded
,
TRUE
);
if
((
share
->
tina_write_filedes
=
my_open
(
share
->
data_file_name
,
O_RDWR
|
O_APPEND
,
MYF
(
0
)))
==
-
1
)
{
DBUG_PRINT
(
"info"
,
(
"Could not open tina file writes"
));
share
->
crashed
=
TRUE
;
DBUG_RETURN
(
1
);
}
share
->
tina_write_opened
=
TRUE
;
DBUG_RETURN
(
0
);
}
bool
ha_tina
::
is_crashed
()
const
{
DBUG_ENTER
(
"ha_tina::is_crashed"
);
...
...
@@ -418,10 +431,13 @@ static int free_share(TINA_SHARE *share)
share
->
crashed
?
TRUE
:
FALSE
);
if
(
my_close
(
share
->
meta_file
,
MYF
(
0
)))
result_code
=
1
;
if
(
share
->
mapped_file
)
my_munmap
(
share
->
mapped_file
,
share
->
file_stat
.
st_size
);
share
->
mapped_file
=
NULL
;
result_code
=
my_close
(
share
->
data_file
,
MYF
(
0
));
if
(
share
->
tina_write_opened
)
{
if
(
my_close
(
share
->
tina_write_filedes
,
MYF
(
0
)))
result_code
=
1
;
share
->
tina_write_opened
=
FALSE
;
}
hash_delete
(
&
tina_open_tables
,
(
byte
*
)
share
);
thr_lock_delete
(
&
share
->
lock
);
pthread_mutex_destroy
(
&
share
->
mutex
);
...
...
@@ -437,30 +453,41 @@ int tina_end(ha_panic_function type)
return
tina_done_func
();
}
/*
Finds the end of a line.
Supports DOS, Unix, or Mac OS line endings.
This function finds the end of a line and returns the length
of the line ending.
We support three kinds of line endings:
'\r' -- Old Mac OS line ending
'\n' -- Traditional Unix and Mac OS X line ending
'\r''\n' -- DOS\Windows line ending
*/
byte
*
find_eoln
(
byte
*
data
,
off_t
begin
,
off_t
end
,
int
*
eoln_len
)
off_t
find_eoln_buff
(
Transparent_file
*
data_buff
,
off_t
begin
,
off_t
end
,
int
*
eoln_len
)
{
off_t
dataend
=
begin
;
*
eoln_len
=
0
;
for
(
off_t
x
=
begin
;
x
<
end
;
x
++
)
if
(
data
[
x
]
==
'\r'
||
data
[
x
]
==
'\n'
)
(
*
eoln_len
)
++
;
else
if
(
!
(
*
eoln_len
)
)
dataend
++
;
{
/* Unix (includes Mac OS X) */
if
(
data_buff
->
get_value
(
x
)
==
'\n'
)
*
eoln_len
=
1
;
else
return
data
+
dataend
;
if
(
data_buff
->
get_value
(
x
)
==
'\r'
)
// Mac or Dos
{
/* old Mac line ending */
if
(
x
+
1
==
end
||
(
data_buff
->
get_value
(
x
+
1
)
!=
'\n'
))
*
eoln_len
=
1
;
else
// DOS style ending
*
eoln_len
=
2
;
}
if
(
*
eoln_len
)
// end of line was found
return
x
;
}
/*
if we only have one record in the file then our for loop will break
before we return. we should still have seen end of line markers and
so we just return the line here
*/
if
(
*
eoln_len
>
0
)
return
data
+
dataend
;
return
0
;
}
...
...
@@ -478,12 +505,13 @@ ha_tina::ha_tina(TABLE_SHARE *table_arg)
They are not probably completely right.
*/
current_position
(
0
),
next_position
(
0
),
local_saved_data_file_length
(
0
),
chain_alloced
(
0
),
chain_size
(
DEFAULT_CHAIN_LENGTH
),
file_buff
(
0
),
chain_alloced
(
0
),
chain_size
(
DEFAULT_CHAIN_LENGTH
),
records_is_known
(
0
)
{
/* Set our original buffers from pre-allocated memory */
buffer
.
set
((
char
*
)
byte_buffer
,
IO_SIZE
,
system_charset_info
);
chain
=
chain_buffer
;
file_buff
=
new
Transparent_file
();
}
...
...
@@ -609,20 +637,18 @@ int ha_tina::chain_append()
*/
int
ha_tina
::
find_current_row
(
byte
*
buf
)
{
byte
*
mapped_ptr
;
byte
*
end_ptr
;
off_t
end_offset
,
curr_offset
=
current_position
;
int
eoln_len
;
my_bitmap_map
*
org_bitmap
;
DBUG_ENTER
(
"ha_tina::find_current_row"
);
mapped_ptr
=
(
byte
*
)
share
->
mapped_file
+
current_position
;
/*
We do not read further then local_saved_data_file_length in order
not to conflict with undergoing concurrent insert.
*/
if
((
end_ptr
=
find_eoln
(
share
->
mapped_file
,
current_position
,
local_saved_data_file_length
,
&
eoln_len
))
==
0
)
if
((
end_offset
=
find_eoln_buff
(
file_buff
,
current_position
,
local_saved_data_file_length
,
&
eoln_len
))
==
0
)
DBUG_RETURN
(
HA_ERR_END_OF_FILE
);
/* Avoid asserts in ::store() for columns that are not going to be updated */
...
...
@@ -631,36 +657,39 @@ int ha_tina::find_current_row(byte *buf)
for
(
Field
**
field
=
table
->
field
;
*
field
;
field
++
)
{
buffer
.
length
(
0
);
if
(
*
mapped_ptr
==
'"'
)
mapped_ptr
++
;
// Increment
past the first quote
if
(
file_buff
->
get_value
(
curr_offset
)
==
'"'
)
curr_offset
++
;
// Increment
past the first quote
else
{
dbug_tmp_restore_column_map
(
table
->
write_set
,
org_bitmap
);
DBUG_RETURN
(
HA_ERR_CRASHED_ON_USAGE
);
}
for
(;
mapped_ptr
!=
end_ptr
;
mapped_ptr
++
)
for
(;
curr_offset
!=
end_offset
;
curr_offset
++
)
{
// Need to convert line feeds!
if
(
*
mapped_ptr
==
'"'
&&
(((
mapped_ptr
[
1
]
==
','
)
&&
(
mapped_ptr
[
2
]
==
'"'
))
||
(
mapped_ptr
==
end_ptr
-
1
)))
if
(
file_buff
->
get_value
(
curr_offset
)
==
'"'
&&
(((
file_buff
->
get_value
(
curr_offset
+
1
)
==
','
)
&&
(
file_buff
->
get_value
(
curr_offset
+
2
)
==
'"'
))
||
(
curr_offset
==
end_offset
-
1
)))
{
mapped_ptr
+=
2
;
// Move past the , and the "
curr_offset
+=
2
;
// Move past the , and the "
break
;
}
if
(
*
mapped_ptr
==
'\\'
&&
mapped_ptr
!=
(
end_ptr
-
1
))
if
(
file_buff
->
get_value
(
curr_offset
)
==
'\\'
&&
curr_offset
!=
(
end_offset
-
1
))
{
mapped_ptr
++
;
if
(
*
mapped_ptr
==
'r'
)
curr_offset
++
;
if
(
file_buff
->
get_value
(
curr_offset
)
==
'r'
)
buffer
.
append
(
'\r'
);
else
if
(
*
mapped_ptr
==
'n'
)
else
if
(
file_buff
->
get_value
(
curr_offset
)
==
'n'
)
buffer
.
append
(
'\n'
);
else
if
((
*
mapped_ptr
==
'\\'
)
||
(
*
mapped_ptr
==
'"'
))
buffer
.
append
(
*
mapped_ptr
);
else
if
((
file_buff
->
get_value
(
curr_offset
)
==
'\\'
)
||
(
file_buff
->
get_value
(
curr_offset
)
==
'"'
))
buffer
.
append
(
file_buff
->
get_value
(
curr_offset
));
else
/* This could only happed with an externally created file */
{
buffer
.
append
(
'\\'
);
buffer
.
append
(
*
mapped_ptr
);
buffer
.
append
(
file_buff
->
get_value
(
curr_offset
)
);
}
}
else
// ordinary symbol
...
...
@@ -669,18 +698,18 @@ int ha_tina::find_current_row(byte *buf)
We are at final symbol and no last quote was found =>
we are working with a damaged file.
*/
if
(
mapped_ptr
==
end_ptr
-
1
)
if
(
curr_offset
==
end_offset
-
1
)
{
dbug_tmp_restore_column_map
(
table
->
write_set
,
org_bitmap
);
DBUG_RETURN
(
HA_ERR_CRASHED_ON_USAGE
);
}
buffer
.
append
(
*
mapped_ptr
);
buffer
.
append
(
file_buff
->
get_value
(
curr_offset
)
);
}
}
if
(
bitmap_is_set
(
table
->
read_set
,
(
*
field
)
->
field_index
))
(
*
field
)
->
store
(
buffer
.
ptr
(),
buffer
.
length
(),
system_charset_info
);
}
next_position
=
(
end_ptr
-
share
->
mapped_file
)
+
eoln_len
;
next_position
=
end_offset
+
eoln_len
;
/* Maybe use \N for null? */
memset
(
buf
,
0
,
table
->
s
->
null_bytes
);
/* We do not implement nulls! */
dbug_tmp_restore_column_map
(
table
->
write_set
,
org_bitmap
);
...
...
@@ -779,7 +808,7 @@ void ha_tina::get_status()
void
ha_tina
::
update_status
()
{
/* correct local_saved_data_file_length for writers */
share
->
saved_data_file_length
=
share
->
file_stat
.
st_size
;
share
->
saved_data_file_length
=
local_saved_data_file_length
;
}
...
...
@@ -830,6 +859,9 @@ int ha_tina::open(const char *name, int mode, uint open_options)
DBUG_RETURN
(
HA_ERR_CRASHED_ON_USAGE
);
}
if
((
data_file
=
my_open
(
share
->
data_file_name
,
O_RDONLY
,
MYF
(
0
)))
==
-
1
)
DBUG_RETURN
(
0
);
/*
Init locking. Pass handler object to the locking routines,
so that they could save/update local_saved_data_file_length value
...
...
@@ -848,12 +880,14 @@ int ha_tina::open(const char *name, int mode, uint open_options)
/*
Close a database file. We remove ourselves from the shared strucutre.
If it is empty we destroy it
and free the mapped file
.
If it is empty we destroy it.
*/
int
ha_tina
::
close
(
void
)
{
int
rc
=
0
;
DBUG_ENTER
(
"ha_tina::close"
);
DBUG_RETURN
(
free_share
(
share
));
rc
=
my_close
(
data_file
,
MYF
(
0
));
DBUG_RETURN
(
free_share
(
share
)
||
rc
);
}
/*
...
...
@@ -876,22 +910,17 @@ int ha_tina::write_row(byte * buf)
size
=
encode_quote
(
buf
);
if
(
my_write
(
share
->
data_file
,
(
byte
*
)
buffer
.
ptr
(),
size
,
MYF
(
MY_WME
|
MY_NABP
)
))
DBUG_RETURN
(
-
1
);
if
(
!
share
->
tina_write_opened
)
if
(
init_tina_writer
(
))
DBUG_RETURN
(
-
1
);
/*
Ok, this is means that we will be doing potentially bad things
during a bulk insert on some OS'es. What we need is a cleanup
call for ::write_row that would let us fix up everything after the bulk
insert. The archive handler does this with an extra mutx call, which
might be a solution for this.
*/
if
(
get_mmap
(
share
,
0
)
>
0
)
/* use pwrite, as concurrent reader could have changed the position */
if
(
my_write
(
share
->
tina_write_filedes
,
(
byte
*
)
buffer
.
ptr
(),
size
,
MYF
(
MY_WME
|
MY_NABP
)))
DBUG_RETURN
(
-
1
);
/* update local copy of the max position to see our own changes */
local_saved_data_file_length
=
share
->
file_stat
.
st_
size
;
local_saved_data_file_length
+=
size
;
/* update shared info */
pthread_mutex_lock
(
&
share
->
mutex
);
...
...
@@ -906,6 +935,23 @@ int ha_tina::write_row(byte * buf)
}
int
ha_tina
::
open_update_temp_file_if_needed
()
{
char
updated_fname
[
FN_REFLEN
];
if
(
!
share
->
update_file_opened
)
{
if
((
update_temp_file
=
my_create
(
fn_format
(
updated_fname
,
share
->
table_name
,
""
,
CSN_EXT
,
MY_REPLACE_EXT
|
MY_UNPACK_FILENAME
),
0
,
O_RDWR
|
O_TRUNC
,
MYF
(
MY_WME
)))
<
0
)
return
1
;
share
->
update_file_opened
=
TRUE
;
}
return
0
;
}
/*
This is called for an update.
Make sure you put in code to increment the auto increment, also
...
...
@@ -929,16 +975,16 @@ int ha_tina::update_row(const byte * old_data, byte * new_data)
if
(
chain_append
())
DBUG_RETURN
(
-
1
);
if
(
my_write
(
share
->
data_file
,
(
byte
*
)
buffer
.
ptr
(),
size
,
if
(
open_update_temp_file_if_needed
())
DBUG_RETURN
(
-
1
);
if
(
my_write
(
update_temp_file
,
(
byte
*
)
buffer
.
ptr
(),
size
,
MYF
(
MY_WME
|
MY_NABP
)))
DBUG_RETURN
(
-
1
);
/* UPDATE should never happen on the log tables */
DBUG_ASSERT
(
!
share
->
is_log_table
);
/* update local copy of the max position to see our own changes */
local_saved_data_file_length
=
share
->
file_stat
.
st_size
;
DBUG_RETURN
(
0
);
}
...
...
@@ -1004,6 +1050,8 @@ int ha_tina::rnd_init(bool scan)
{
DBUG_ENTER
(
"ha_tina::rnd_init"
);
/* set buffer to the beginning of the file */
file_buff
->
init_buff
(
data_file
);
if
(
share
->
crashed
)
DBUG_RETURN
(
HA_ERR_CRASHED_ON_USAGE
);
...
...
@@ -1011,11 +1059,6 @@ int ha_tina::rnd_init(bool scan)
stats
.
records
=
0
;
records_is_known
=
0
;
chain_ptr
=
chain
;
#ifdef HAVE_MADVISE
if
(
scan
)
(
void
)
madvise
(
share
->
mapped_file
,
share
->
file_stat
.
st_size
,
MADV_SEQUENTIAL
);
#endif
DBUG_RETURN
(
0
);
}
...
...
@@ -1045,8 +1088,11 @@ int ha_tina::rnd_next(byte *buf)
ha_statistic_increment
(
&
SSV
::
ha_read_rnd_next_count
);
current_position
=
next_position
;
if
(
!
share
->
mapped_file
)
/* don't scan an empty file */
if
(
!
local_saved_data_file_length
)
DBUG_RETURN
(
HA_ERR_END_OF_FILE
);
if
((
rc
=
find_current_row
(
buf
)))
DBUG_RETURN
(
rc
);
...
...
@@ -1115,6 +1161,22 @@ int ha_tina::extra(enum ha_extra_function operation)
DBUG_RETURN
(
0
);
}
/*
Set end_pos to the last valid byte of continuous area, closest
to the given "hole", stored in the buffer. "Valid" here means,
not listed in the chain of deleted records ("holes").
*/
bool
ha_tina
::
get_write_pos
(
off_t
*
end_pos
,
tina_set
*
closest_hole
)
{
if
(
closest_hole
==
chain_ptr
)
/* no more chains */
*
end_pos
=
file_buff
->
end
();
else
*
end_pos
=
min
(
file_buff
->
end
(),
closest_hole
->
begin
);
return
(
closest_hole
!=
chain_ptr
)
&&
(
*
end_pos
==
closest_hole
->
begin
);
}
/*
Called after each table scan. In particular after deletes,
and updates. In the last case we employ chain of deleted
...
...
@@ -1123,53 +1185,107 @@ int ha_tina::extra(enum ha_extra_function operation)
*/
int
ha_tina
::
rnd_end
()
{
char
updated_fname
[
FN_REFLEN
];
off_t
file_buffer_start
=
0
;
DBUG_ENTER
(
"ha_tina::rnd_end"
);
records_is_known
=
1
;
/* First position will be truncate position, second will be increment */
if
((
chain_ptr
-
chain
)
>
0
)
{
tina_set
*
ptr
;
size_t
length
;
tina_set
*
ptr
=
chain
;
/*
Setting up writable map, this will contain all of the data after
the
get_mmap call that we have added to the file
.
Re-read the beginning of a file (as the buffer should point to
the
end of file after the scan)
.
*/
if
(
get_mmap
(
share
,
1
)
>
0
)
DBUG_RETURN
(
-
1
);
length
=
share
->
file_stat
.
st_size
;
file_buff
->
init_buff
(
data_file
);
/*
The sort handles updates/deletes with random orders.
It also sorts so that we move the final blocks to the
beginning so that we move the smallest amount of data possible.
The sort is needed when there were updates/deletes with random orders.
It sorts so that we move the firts blocks to the beginning.
*/
qsort
(
chain
,
(
size_t
)(
chain_ptr
-
chain
),
sizeof
(
tina_set
),
(
qsort_cmp
)
sort_set
);
for
(
ptr
=
chain
;
ptr
<
chain_ptr
;
ptr
++
)
off_t
write_begin
=
0
,
write_end
;
/* create the file to write updated table if it wasn't yet created */
if
(
open_update_temp_file_if_needed
())
DBUG_RETURN
(
-
1
);
/* write the file with updated info */
while
((
file_buffer_start
!=
-
1
))
// while not end of file
{
memmove
(
share
->
mapped_file
+
ptr
->
begin
,
share
->
mapped_file
+
ptr
->
end
,
length
-
(
size_t
)
ptr
->
end
);
length
=
length
-
(
size_t
)(
ptr
->
end
-
ptr
->
begin
);
bool
in_hole
=
get_write_pos
(
&
write_end
,
ptr
);
/* if there is something to write, write it */
if
((
write_end
-
write_begin
)
&&
(
my_write
(
update_temp_file
,
(
byte
*
)(
file_buff
->
ptr
()
+
(
write_begin
-
file_buff
->
start
())),
write_end
-
write_begin
,
MYF_RW
)))
goto
error
;
if
(
in_hole
)
{
/* skip hole */
while
(
file_buff
->
end
()
<=
ptr
->
end
&&
file_buffer_start
!=
-
1
)
file_buffer_start
=
file_buff
->
read_next
();
write_begin
=
ptr
->
end
;
ptr
++
;
}
else
write_begin
=
write_end
;
if
(
write_end
==
file_buff
->
end
())
file_buffer_start
=
file_buff
->
read_next
();
/* shift the buffer */
}
/* Unmap the file before the new size is set */
if
(
my_munmap
(
share
->
mapped_file
,
share
->
file_stat
.
st_size
))
if
(
my_sync
(
update_temp_file
,
MYF
(
MY_WME
))
||
my_close
(
update_temp_file
,
MYF
(
0
)
))
DBUG_RETURN
(
-
1
);
/* We set it to null so that get_mmap() won't try to unmap it */
share
->
mapped_file
=
NULL
;
/* Set the file to the new size */
if
(
my_chsize
(
share
->
data_file
,
length
,
0
,
MYF
(
MY_WME
)))
share
->
update_file_opened
=
FALSE
;
if
(
share
->
tina_write_opened
)
{
if
(
my_close
(
share
->
tina_write_filedes
,
MYF
(
0
)))
DBUG_RETURN
(
-
1
);
/*
Mark that the writer fd is closed, so that init_tina_writer()
will reopen it later.
*/
share
->
tina_write_opened
=
FALSE
;
}
/*
Close opened fildes's. Then move updated file in place
of the old datafile.
*/
if
(
my_close
(
data_file
,
MYF
(
0
))
||
my_rename
(
fn_format
(
updated_fname
,
share
->
table_name
,
""
,
CSN_EXT
,
MY_REPLACE_EXT
|
MY_UNPACK_FILENAME
),
share
->
data_file_name
,
MYF
(
0
)))
DBUG_RETURN
(
-
1
);
if
(
get_mmap
(
share
,
0
)
>
0
)
/* Open the file again */
if
(((
data_file
=
my_open
(
share
->
data_file_name
,
O_RDONLY
,
MYF
(
0
)))
==
-
1
))
DBUG_RETURN
(
-
1
);
/*
The datafile is consistent at this point and the write filedes is
closed, so nothing worrying will happen to it in case of a crash.
Here we record this fact to the meta-file.
*/
(
void
)
write_meta_file
(
share
->
meta_file
,
share
->
rows_recorded
,
FALSE
);
}
DBUG_RETURN
(
0
);
error:
my_close
(
update_temp_file
,
MYF
(
0
));
share
->
update_file_opened
=
FALSE
;
DBUG_RETURN
(
-
1
);
}
...
...
@@ -1198,10 +1314,11 @@ int ha_tina::repair(THD* thd, HA_CHECK_OPT* check_opt)
File
repair_file
;
int
rc
;
ha_rows
rows_repaired
=
0
;
off_t
write_begin
=
0
,
write_end
;
DBUG_ENTER
(
"ha_tina::repair"
);
/* empty file */
if
(
!
share
->
mapped_file
)
if
(
!
share
->
saved_data_file_length
)
{
share
->
rows_recorded
=
0
;
goto
end
;
...
...
@@ -1212,12 +1329,15 @@ int ha_tina::repair(THD* thd, HA_CHECK_OPT* check_opt)
if
(
!
(
buf
=
(
byte
*
)
my_malloc
(
table
->
s
->
reclength
,
MYF
(
MY_WME
))))
DBUG_RETURN
(
HA_ERR_OUT_OF_MEM
);
/* position buffer to the start of the file */
file_buff
->
init_buff
(
data_file
);
/*
Local_saved_data_file_length is initialized during the lock phase.
Sometimes this is not getting executed before ::repair (e.g. for
the log tables). We set it manually here.
*/
local_saved_data_file_length
=
share
->
file_stat
.
st_size
;
local_saved_data_file_length
=
share
->
saved_data_file_length
;
/* set current position to the beginning of the file */
current_position
=
next_position
=
0
;
...
...
@@ -1232,11 +1352,10 @@ int ha_tina::repair(THD* thd, HA_CHECK_OPT* check_opt)
if
(
rc
==
HA_ERR_END_OF_FILE
)
{
/* All rows were read ok until end of file, the file does not need repair. */
/*
If rows_recorded != rows_repaired, we should update
rows_recorded value to the current amount of rows.
All rows were read ok until end of file, the file does not need repair.
If rows_recorded != rows_repaired, we should update rows_recorded value
to the current amount of rows.
*/
share
->
rows_recorded
=
rows_repaired
;
goto
end
;
...
...
@@ -1252,36 +1371,45 @@ int ha_tina::repair(THD* thd, HA_CHECK_OPT* check_opt)
0
,
O_RDWR
|
O_TRUNC
,
MYF
(
MY_WME
)))
<
0
)
DBUG_RETURN
(
HA_ERR_CRASHED_ON_REPAIR
);
if
(
my_write
(
repair_file
,
(
byte
*
)
share
->
mapped_file
,
current_position
,
MYF
(
MY_NABP
)))
DBUG_RETURN
(
HA_ERR_CRASHED_ON_USAGE
);
my_close
(
repair_file
,
MYF
(
0
));
file_buff
->
init_buff
(
data_file
);
/* we just truncated the file up to the first bad row. update rows count. */
share
->
rows_recorded
=
rows_repaired
;
if
(
my_munmap
(
share
->
mapped_file
,
share
->
file_stat
.
st_size
))
DBUG_RETURN
(
-
1
);
/* We set it to null so that get_mmap() won't try to unmap it */
share
->
mapped_file
=
NULL
;
/* write repaired file */
while
(
1
)
{
write_end
=
min
(
file_buff
->
end
(),
current_position
);
if
((
write_end
-
write_begin
)
&&
(
my_write
(
repair_file
,
(
byte
*
)
file_buff
->
ptr
(),
write_end
-
write_begin
,
MYF_RW
)))
DBUG_RETURN
(
-
1
);
write_begin
=
write_end
;
if
(
write_end
==
current_position
)
break
;
else
file_buff
->
read_next
();
/* shift the buffer */
}
/*
Close the
"to"-file before renaming
On Windows one cannot rename a file, which descriptor
is still open. EACCES will be returned when trying to delete
the "to"-file in my_rename()
Close the
files and rename repaired file to the datafile.
We have to close the files, as on Windows one cannot rename
a file, which descriptor is still open. EACCES will be returned
when trying to delete the "to"-file in my_rename().
*/
my_close
(
share
->
data_file
,
MYF
(
0
));
if
(
my_rename
(
repaired_fname
,
share
->
data_file_name
,
MYF
(
0
)))
if
(
my_close
(
data_file
,
MYF
(
0
))
||
my_close
(
repair_file
,
MYF
(
0
))
||
my_rename
(
repaired_fname
,
share
->
data_file_name
,
MYF
(
0
)))
DBUG_RETURN
(
-
1
);
/* Open the file again, it should now be repaired */
if
((
share
->
data_file
=
my_open
(
share
->
data_file_name
,
O_RDWR
|
O_APPEND
,
MYF
(
0
)))
==
-
1
)
if
((
data_file
=
my_open
(
share
->
data_file_name
,
O_RDWR
|
O_APPEND
,
MYF
(
0
)))
==
-
1
)
DBUG_RETURN
(
-
1
);
if
(
get_mmap
(
share
,
0
)
>
0
)
DBUG_RETURN
(
-
1
)
;
/* Set new file size. The file size will be updated by ::update_status() */
local_saved_data_file_length
=
(
size_t
)
current_position
;
end:
share
->
crashed
=
FALSE
;
...
...
@@ -1300,17 +1428,12 @@ int ha_tina::delete_all_rows()
if
(
!
records_is_known
)
DBUG_RETURN
(
my_errno
=
HA_ERR_WRONG_COMMAND
);
/* Unmap the file before the new size is set */
if
(
share
->
mapped_file
&&
my_munmap
(
share
->
mapped_file
,
share
->
file_stat
.
st_size
))
DBUG_RETURN
(
-
1
);
share
->
mapped_file
=
NULL
;
if
(
!
share
->
tina_write_opened
)
if
(
init_tina_writer
())
DBUG_RETURN
(
-
1
);
/* Truncate the file to zero size */
rc
=
my_chsize
(
share
->
data_file
,
0
,
0
,
MYF
(
MY_WME
));
if
(
get_mmap
(
share
,
0
)
>
0
)
DBUG_RETURN
(
-
1
);
rc
=
my_chsize
(
share
->
tina_write_filedes
,
0
,
0
,
MYF
(
MY_WME
));
stats
.
records
=
0
;
DBUG_RETURN
(
rc
);
...
...
@@ -1372,12 +1495,15 @@ int ha_tina::check(THD* thd, HA_CHECK_OPT* check_opt)
if
(
!
(
buf
=
(
byte
*
)
my_malloc
(
table
->
s
->
reclength
,
MYF
(
MY_WME
))))
DBUG_RETURN
(
HA_ERR_OUT_OF_MEM
);
/* position buffer to the start of the file */
file_buff
->
init_buff
(
data_file
);
/*
Local_saved_data_file_length is initialized during the lock phase.
Check does not use store_lock in certain cases. So, we set it
manually here.
*/
local_saved_data_file_length
=
share
->
file_stat
.
st_size
;
local_saved_data_file_length
=
share
->
saved_data_file_length
;
/* set current position to the beginning of the file */
current_position
=
next_position
=
0
;
/* Read the file row-by-row. If everything is ok, repair is not needed. */
...
...
storage/csv/ha_tina.h
View file @
86f2f582
...
...
@@ -29,15 +29,12 @@
typedef
struct
st_tina_share
{
char
*
table_name
;
char
data_file_name
[
FN_REFLEN
];
byte
*
mapped_file
;
/* mapped region of file */
uint
table_name_length
,
use_count
;
/*
Below flag is needed to make log tables work with concurrent insert.
For more details see comment to ha_tina::update_status.
*/
my_bool
is_log_table
;
MY_STAT
file_stat
;
/* Stat information for the data file */
File
data_file
;
/* Current open data file */
/*
Here we save the length of the file for readers. This is updated by
inserts, updates and deletes. The var is initialized along with the
...
...
@@ -46,7 +43,10 @@ typedef struct st_tina_share {
off_t
saved_data_file_length
;
pthread_mutex_t
mutex
;
THR_LOCK
lock
;
bool
update_file_opened
;
bool
tina_write_opened
;
File
meta_file
;
/* Meta file we use */
File
tina_write_filedes
;
/* File handler for readers */
bool
crashed
;
/* Meta file is crashed */
ha_rows
rows_recorded
;
/* Number of rows in tables */
}
TINA_SHARE
;
...
...
@@ -56,6 +56,49 @@ struct tina_set {
off_t
end
;
};
class
Transparent_file
{
File
filedes
;
byte
*
buff
;
/* in-memory window to the file or mmaped area */
/* current window sizes */
off_t
lower_bound
;
off_t
upper_bound
;
uint
buff_size
;
public:
Transparent_file
()
:
lower_bound
(
0
),
buff_size
(
IO_SIZE
)
{
buff
=
(
byte
*
)
my_malloc
(
buff_size
*
sizeof
(
byte
),
MYF
(
MY_WME
));
}
~
Transparent_file
()
{
my_free
((
gptr
)
buff
,
MYF
(
MY_ALLOW_ZERO_PTR
));
}
void
init_buff
(
File
filedes_arg
)
{
filedes
=
filedes_arg
;
/* read the beginning of the file */
lower_bound
=
0
;
VOID
(
my_seek
(
filedes
,
0
,
MY_SEEK_SET
,
MYF
(
0
)));
if
(
filedes
&&
buff
)
upper_bound
=
my_read
(
filedes
,
buff
,
buff_size
,
MYF
(
0
));
}
byte
*
ptr
()
{
return
buff
;
}
off_t
start
()
{
return
lower_bound
;
}
off_t
end
()
{
return
upper_bound
;
}
/* get a char from the given position in the file */
char
get_value
(
off_t
offset
);
/* shift a buffer windows to see the next part of the file */
off_t
read_next
();
};
class
ha_tina
:
public
handler
{
THR_LOCK_DATA
lock
;
/* MySQL lock */
...
...
@@ -64,6 +107,9 @@ class ha_tina: public handler
off_t
next_position
;
/* Next position in the file scan */
off_t
local_saved_data_file_length
;
/* save position for reads */
byte
byte_buffer
[
IO_SIZE
];
Transparent_file
*
file_buff
;
File
data_file
;
/* File handler for readers */
File
update_temp_file
;
String
buffer
;
/*
The chain contains "holes" in the file, occured because of
...
...
@@ -77,12 +123,19 @@ class ha_tina: public handler
uint32
chain_size
;
bool
records_is_known
;
private:
bool
get_write_pos
(
off_t
*
end_pos
,
tina_set
*
closest_hole
);
int
open_update_temp_file_if_needed
();
int
init_tina_writer
();
public:
ha_tina
(
TABLE_SHARE
*
table_arg
);
~
ha_tina
()
~
ha_tina
()
{
if
(
chain_alloced
)
my_free
((
gptr
)
chain
,
0
);
my_free
((
gptr
)
chain
,
0
);
if
(
file_buff
)
delete
file_buff
;
}
const
char
*
table_type
()
const
{
return
"CSV"
;
}
const
char
*
index_type
(
uint
inx
)
{
return
"NONE"
;
}
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment