Commit 161fea4a authored by cmiller@zippy.cornsilk.net's avatar cmiller@zippy.cornsilk.net

Merge bk-internal.mysql.com:/home/bk/mysql-5.1

into  zippy.cornsilk.net:/home/cmiller/work/mysql/mysql-5.1-maint
parents 60e178bb b9847507
......@@ -1242,6 +1242,7 @@ mysql-test/gmon.out
mysql-test/install_test_db
mysql-test/mtr
mysql-test/mysql-test-run
mysql-test/mysql-test-run-shell
mysql-test/mysql-test-run.log
mysql-test/mysql_test_run_new
mysql-test/ndb/ndbcluster
......
......@@ -22,11 +22,15 @@ BUILT_SOURCES= $(top_builddir)/include/mysqld_error.h \
$(top_builddir)/include/sql_state.h \
$(top_builddir)/include/mysqld_ername.h
pkginclude_HEADERS= $(BUILT_SOURCES)
CLEANFILES = $(BUILT_SOURCES)
DISTCLEANFILES = $(BUILT_SOURCES)
SUBDIRS = @yassl_dir@
# This will build mysqld_error.h and sql_state.h
$(top_builddir)/include/mysqld_error.h: comp_err$(EXEEXT)
# This will build mysqld_error.h, mysqld_ername.h and sql_state.h
# NOTE Built files should depend on their sources to avoid
# the built files being rebuilt in source dist
$(top_builddir)/include/mysqld_error.h: comp_err.c \
$(top_srcdir)/sql/share/errmsg.txt
$(MAKE) $(AM_MAKEFLAGS) comp_err$(EXEEXT)
$(top_builddir)/extra/comp_err$(EXEEXT) \
--charset=$(top_srcdir)/sql/share/charsets \
--out-dir=$(top_builddir)/sql/share/ \
......@@ -37,9 +41,10 @@ $(top_builddir)/include/mysqld_error.h: comp_err$(EXEEXT)
$(top_builddir)/include/mysqld_ername.h: $(top_builddir)/include/mysqld_error.h
$(top_builddir)/include/sql_state.h: $(top_builddir)/include/mysqld_error.h
bin_PROGRAMS = replace comp_err perror resolveip my_print_defaults \
bin_PROGRAMS = replace perror resolveip my_print_defaults \
resolve_stack_dump mysql_waitpid innochecksum
noinst_PROGRAMS = charset2html
EXTRA_PROGRAMS = comp_err
EXTRA_DIST = CMakeLists.txt
perror.o: perror.c
......
......@@ -6,3 +6,7 @@ libyassl_la_SOURCES = buffer.cpp cert_wrapper.cpp crypto_wrapper.cpp \
template_instnt.cpp timer.cpp yassl_imp.cpp yassl_error.cpp yassl_int.cpp
EXTRA_DIST = $(wildcard ../include/*.hpp) $(wildcard ../include/openssl/*.h)
AM_CXXFLAGS = -DYASSL_PURE_C -DYASSL_PREFIX
# Don't update the files from bitkeeper
%::SCCS/s.%
INCLUDES = -I../include -I../../mySTL
bin_PROGRAMS = benchmark
benchmark_SOURCES = benchmark.cpp
benchmark_LDFLAGS = -L../src
benchmark_LDADD = -ltaocrypt
benchmark_LDADD = $(top_builddir)/extra/yassl/taocrypt/src/libtaocrypt.la
benchmark_CXXFLAGS = -DYASSL_PURE_C
benchmark_DEPENDENCIES = ../src/libtaocrypt.la
EXTRA_DIST = benchmark.dsp rsa1024.der dh1024.der dsa1024.der make.bat
......@@ -11,3 +11,7 @@ libtaocrypt_la_SOURCES = aes.cpp aestables.cpp algebra.cpp arc4.cpp \
libtaocrypt_la_CXXFLAGS = @yassl_taocrypt_extra_cxxflags@ -DYASSL_PURE_C
EXTRA_DIST = $(wildcard ../include/*.hpp)
# Don't update the files from bitkeeper
%::SCCS/s.%
INCLUDES = -I../include -I../../mySTL
bin_PROGRAMS = test
test_SOURCES = test.cpp
test_LDFLAGS = -L../src
test_LDADD = -ltaocrypt
test_DEPENDENCIES = ../src/libtaocrypt.la
test_LDADD = $(top_builddir)/extra/yassl/taocrypt/src/libtaocrypt.la
test_CXXFLAGS = -DYASSL_PURE_C
EXTRA_DIST = make.bat
......@@ -4,8 +4,11 @@ testsuite_SOURCES = testsuite.cpp ../taocrypt/test/test.cpp \
../examples/client/client.cpp ../examples/server/server.cpp \
../examples/echoclient/echoclient.cpp \
../examples/echoserver/echoserver.cpp
testsuite_LDFLAGS = -L../src/ -L../taocrypt/src
testsuite_CXXFLAGS = -DYASSL_PURE_C -DYASSL_PREFIX -DNO_MAIN_DRIVER
testsuite_LDADD = -lyassl -ltaocrypt
testsuite_DEPENDENCIES = ../src/libyassl.la ../taocrypt/src/libtaocrypt.la
testsuite_LDADD = $(top_builddir)/extra/yassl/src/libyassl.la \
$(top_builddir)/extra/yassl/taocrypt/src/libtaocrypt.la
EXTRA_DIST = testsuite.dsp test.hpp input quit make.bat
# Don't update the files from bitkeeper
%::SCCS/s.%
......@@ -22,9 +22,10 @@ DIST_SUBDIRS=ndb
benchdir_root= $(prefix)
testdir = $(benchdir_root)/mysql-test
EXTRA_SCRIPTS = mysql-test-run.sh install_test_db.sh valgrind.supp $(PRESCRIPTS)
EXTRA_SCRIPTS = mysql-test-run-shell.sh install_test_db.sh \
valgrind.supp $(PRESCRIPTS)
EXTRA_DIST = $(EXTRA_SCRIPTS)
GENSCRIPTS = mysql-test-run install_test_db mtr
GENSCRIPTS = mysql-test-run-shell install_test_db mtr mysql-test-run
PRESCRIPTS = mysql-test-run.pl
test_SCRIPTS = $(GENSCRIPTS) $(PRESCRIPTS)
test_DATA = std_data/client-key.pem std_data/client-cert.pem \
......@@ -118,6 +119,11 @@ mtr:
$(RM) -f mtr
$(LN_S) mysql-test-run.pl mtr
# mysql-test-run - a shortcut for executing mysql-test-run.pl
mysql-test-run:
$(RM) -f mysql-test-run
$(LN_S) mysql-test-run.pl mysql-test-run
SUFFIXES = .sh
.sh:
......
......@@ -4,12 +4,4 @@ disable_query_log;
show variables like "have_ndbcluster";
enable_query_log;
# Check that NDB is installed and known to be working
# This will disable ndb from the shell script 'mysql-test-run'
-- require r/have_ndb_status_ok.require
disable_query_log;
eval select "$NDB_STATUS_OK" as ndb_status_ok;
enable_query_log;
......@@ -538,8 +538,6 @@ sub mtr_options_from_test_file($$) {
while ( my $line= <$F> )
{
chomp;
next if ( $line !~ /^--/ );
# Match this line against tag in "tags" array
......
......@@ -476,14 +476,6 @@ sub mtr_kill_leftovers () {
mtr_debug("Got pid: $pid from file '$pidfile'");
# Race, could have been removed between I tested with -f
# and the unlink() below, so I better check again with -f
if ( ! unlink($pidfile) and -f $pidfile )
{
mtr_error("can't remove $pidfile");
}
if ( $::glob_cygwin_perl or kill(0, $pid) )
{
mtr_debug("There is process with pid $pid -- scheduling for kill.");
......
......@@ -905,13 +905,13 @@ sub command_line_setup () {
if ( ! $opt_testcase_timeout )
{
$opt_testcase_timeout= $default_testcase_timeout;
$opt_testcase_timeout*= 10 if defined $opt_valgrind;
$opt_testcase_timeout*= 10 if $opt_valgrind;
}
if ( ! $opt_suite_timeout )
{
$opt_suite_timeout= $default_suite_timeout;
$opt_suite_timeout*= 4 if defined $opt_valgrind;
$opt_suite_timeout*= 6 if $opt_valgrind;
}
# Increase times to wait for executables to start if using valgrind
......@@ -943,6 +943,7 @@ sub command_line_setup () {
$master->[0]=
{
pid => 0,
type => "master",
idx => 0,
path_myddir => "$opt_vardir/master-data",
......@@ -958,6 +959,7 @@ sub command_line_setup () {
$master->[1]=
{
pid => 0,
type => "master",
idx => 1,
path_myddir => "$opt_vardir/master1-data",
......@@ -973,6 +975,7 @@ sub command_line_setup () {
$slave->[0]=
{
pid => 0,
type => "slave",
idx => 0,
path_myddir => "$opt_vardir/slave-data",
......@@ -989,6 +992,7 @@ sub command_line_setup () {
$slave->[1]=
{
pid => 0,
type => "slave",
idx => 1,
path_myddir => "$opt_vardir/slave1-data",
......@@ -1004,6 +1008,7 @@ sub command_line_setup () {
$slave->[2]=
{
pid => 0,
type => "slave",
idx => 2,
path_myddir => "$opt_vardir/slave2-data",
......@@ -1064,7 +1069,7 @@ sub command_line_setup () {
connect_string => "$opt_ndbconnectstring",
path_pid => "$data_dir/ndb_3.pid", # Nodes + 1
pid => 0, # pid of ndb_mgmd
installed_ok => 'NO',
installed_ok => 0,
};
$data_dir= "$opt_vardir/ndbcluster-$opt_ndbcluster_port_slave";
......@@ -1077,7 +1082,7 @@ sub command_line_setup () {
connect_string => "$opt_ndbconnectstring_slave",
path_pid => "$data_dir/ndb_2.pid", # Nodes + 1
pid => 0, # pid of ndb_mgmd
installed_ok => 'NO',
installed_ok => 0,
};
# Init pids of ndbd's
......@@ -1172,9 +1177,9 @@ sub executable_setup () {
"$path_client_bindir/mysqld-nt",
"$path_client_bindir/mysqld",
"$path_client_bindir/mysqld-max",
"$path_client_bindir/mysqld-debug",
"$glob_basedir/sql/release/mysqld",
"$glob_basedir/sql/debug/mysqld");
"$path_client_bindir/mysqld-debug",
$path_language= mtr_path_exists("$glob_basedir/share/english/",
"$glob_basedir/sql/share/english/");
$path_charsetsdir= mtr_path_exists("$glob_basedir/share/charsets",
......@@ -1539,7 +1544,8 @@ sub environment_setup () {
my $cmdline_mysql=
"$exe_mysql --no-defaults --host=localhost --user=root --password= " .
"--port=$master->[0]->{'port'} " .
"--socket=$master->[0]->{'path_sock'}";
"--socket=$master->[0]->{'path_sock'} ".
"--character-sets-dir=$path_charsetsdir";
$ENV{'MYSQL'}= $cmdline_mysql;
......@@ -1662,7 +1668,7 @@ sub kill_running_server () {
{
# Ensure that no old mysqld test servers are running
# This is different from terminating processes we have
# started from ths run of the script, this is terminating
# started from this run of the script, this is terminating
# leftovers from previous runs.
mtr_report("Killing Possible Leftover Processes");
......@@ -2256,9 +2262,9 @@ sub mysql_install_db () {
if ( $use_slaves )
{
install_db('slave', $slave->[0]->{'path_myddir'});
install_db('slave', $slave->[1]->{'path_myddir'});
install_db('slave', $slave->[2]->{'path_myddir'});
install_db('slave1', $slave->[0]->{'path_myddir'});
install_db('slave2', $slave->[1]->{'path_myddir'});
install_db('slave3', $slave->[2]->{'path_myddir'});
}
if ( ! $opt_skip_im )
......@@ -2280,21 +2286,18 @@ sub mysql_install_db () {
next if !$cluster->{'pid'};
$cluster->{'installed_ok'}= "YES"; # Assume install suceeds
$cluster->{'installed_ok'}= 1; # Assume install suceeds
if (ndbcluster_wait_started($cluster, ""))
{
# failed to install, disable usage and flag that its no ok
mtr_report("ndbcluster_install of $cluster->{'name'} failed");
$cluster->{"installed_ok"}= "NO";
$cluster->{"installed_ok"}= 0;
$cluster_started_ok= 0;
}
}
$ENV{'NDB_STATUS_OK'}= $clusters->[0]->{'installed_ok'};
$ENV{'NDB_SLAVE_STATUS_OK'}= $clusters->[1]->{'installed_ok'};;
if ( ! $cluster_started_ok )
{
if ( $opt_force)
......@@ -2372,6 +2375,12 @@ sub install_db ($$) {
mtr_add_arg($args, "--skip-ndbcluster");
mtr_add_arg($args, "--tmpdir=.");
if ( $opt_debug )
{
mtr_add_arg($args, "--debug=d:t:i:A,%s/log/bootstrap_%s.trace",
$opt_vardir_trace, $type);
}
if ( ! $opt_netware )
{
mtr_add_arg($args, "--language=%s", $path_language);
......@@ -2505,7 +2514,7 @@ sub run_testcase_check_skip_test($)
}
# If test needs cluster, check that master installed ok
if ( $tinfo->{'ndb_test'} and $clusters->[0]->{'installed_ok'} eq "NO" )
if ( $tinfo->{'ndb_test'} and !$clusters->[0]->{'installed_ok'} )
{
mtr_report_test_name($tinfo);
mtr_report_test_failed($tinfo);
......@@ -2514,7 +2523,7 @@ sub run_testcase_check_skip_test($)
# If test needs slave cluster, check that it installed ok
if ( $tinfo->{'ndb_test'} and $tinfo->{'slave_num'} and
$clusters->[1]->{'installed_ok'} eq "NO" )
!$clusters->[1]->{'installed_ok'} )
{
mtr_report_test_name($tinfo);
mtr_report_test_failed($tinfo);
......@@ -2879,7 +2888,7 @@ sub mysqld_arguments ($$$$$) {
mtr_add_arg($args, "%s--console", $prefix);
mtr_add_arg($args, "%s--basedir=%s", $prefix, $path_my_basedir);
mtr_add_arg($args, "%s--character-sets-dir=%s", $prefix, $path_charsetsdir);
mtr_add_arg($args, "%s--core", $prefix);
mtr_add_arg($args, "%s--log-bin-trust-function-creators", $prefix);
mtr_add_arg($args, "%s--default-character-set=latin1", $prefix);
mtr_add_arg($args, "%s--language=%s", $prefix, $path_language);
......@@ -2940,8 +2949,6 @@ sub mysqld_arguments ($$$$$) {
mtr_add_arg($args, "%s--datadir=%s", $prefix,
$slave->[$idx]->{'path_myddir'});
# FIXME slave get this option twice?!
mtr_add_arg($args, "%s--exit-info=256", $prefix);
mtr_add_arg($args, "%s--init-rpl-role=slave", $prefix);
if (! $opt_skip_slave_binlog)
{
......@@ -3059,10 +3066,23 @@ sub mysqld_arguments ($$$$$) {
mtr_add_arg($args, "%s--user=root", $prefix);
}
my $found_skip_core= 0;
foreach my $arg ( @opt_extra_mysqld_opt, @$extra_opt )
{
# Allow --skip-core-file to be set in master.opt file
if ($arg eq "--skip-core-file")
{
$found_skip_core= 1;
}
else
{
mtr_add_arg($args, "%s%s", $prefix, $arg);
}
}
if ( !$found_skip_core )
{
mtr_add_arg($args, "%s%s", $prefix, "--core-file");
}
if ( $opt_bench )
{
......@@ -3071,7 +3091,6 @@ sub mysqld_arguments ($$$$$) {
}
elsif ( $type eq 'master' )
{
mtr_add_arg($args, "%s--exit-info=256", $prefix);
mtr_add_arg($args, "%s--open-files-limit=1024", $prefix);
mtr_add_arg($args, "%s--log=%s", $prefix, $master->[0]->{'path_mylog'});
}
......@@ -3103,13 +3122,13 @@ sub mysqld_start ($$$) {
{
$exe= $exe_master_mysqld;
}
if ( $type eq 'slave' )
elsif ( $type eq 'slave' )
{
$exe= $exe_slave_mysqld;
}
else
{
$exe= $exe_mysqld;
mtr_error("Unknown 'type' \"$type\" passed to mysqld_start");
}
mtr_init_args(\$args);
......@@ -3410,7 +3429,6 @@ sub run_testcase_stop_servers($$$) {
my %admin_pids; # hash of admin processes that requests shutdown
my @kill_pids; # list of processes to shutdown/kill
# Remember if we restarted for this test case
$tinfo->{'restarted'}= $do_restart;
......@@ -3720,7 +3738,6 @@ sub run_check_testcase ($$) {
sub run_mysqltest ($) {
my ($tinfo)= @_;
my $exe= $exe_mysqltest;
my $args;
......
......@@ -262,6 +262,13 @@ desc t3;
Field Type Null Key Default Extra
a decimal(21,2) NO 0.00
drop table t1,t2,t3;
select 1e-308, 1.00000001e-300, 100000000e-300;
1e-308 1.00000001e-300 100000000e-300
0 1.00000001e-300 1e-292
select 10e307;
10e307
1e+308
End of 4.1 tests
create table t1 (s1 float(0,2));
ERROR 42000: For float(M,D), double(M,D) or decimal(M,D), M must be >= D (column 's1').
create table t1 (s1 float(1,2));
......
......@@ -6,31 +6,3 @@ use prn;
ERROR 42000: Unknown database 'prn'
create table nu (a int);
drop table nu;
CREATE TABLE `t1` (
`TIM` datetime NOT NULL,
`VAL` double default NULL
) ENGINE=MyISAM DEFAULT CHARSET=latin1;
CREATE TABLE `t2` (
`TIM` datetime NOT NULL,
`VAL` double default NULL
) ENGINE=MyISAM DEFAULT CHARSET=latin1;
CREATE TABLE `mt` (
`TIM` datetime NOT NULL,
`VAL` double default NULL
) ENGINE=MRG_MyISAM DEFAULT CHARSET=latin1 INSERT_METHOD=LAST
UNION=(`t1`,`t2`);
INSERT INTO mt VALUES ('2006-01-01',0);
ALTER TABLE `t2` RENAME TO `t`;
INSERT INTO mt VALUES ('2006-01-01',0);
ERROR HY000: Can't lock file (errno: 155)
select * from mt;
ERROR HY000: Can't lock file (errno: 155)
FLUSH TABLES;
select * from mt;
ERROR HY000: Can't find file: 'mt' (errno: 2)
ALTER TABLE `t` RENAME TO `t2`;
INSERT INTO mt VALUES ('2006-01-01',0);
select * from mt;
TIM VAL
2006-01-01 00:00:00 0
2006-01-01 00:00:00 0
--skip-stack-trace --skip-core-file
......@@ -42,8 +42,5 @@ rpl_multi_engine : BUG#22583 2006-09-23 lars
#rpl_ndb_idempotent : BUG#21298 2006-07-27 msvensson
#rpl_row_basic_7ndb : BUG#21298 2006-07-27 msvensson
#rpl_truncate_7ndb : BUG#21298 2006-07-27 msvensson
crash_commit_before : 2006-08-02 msvensson
func_group : BUG#21924 2006-08-30 reggie
func_in : BUG#21925 2006-08-30 reggie
ndb_binlog_discover : bug#21806 2006-08-24
ndb_autodiscover3 : bug#21806
......@@ -3,6 +3,7 @@
# i.e. lower_case_filesystem=OFF
#
-- source include/have_case_sensitive_file_system.inc
-- source include/not_embedded.inc
connect (master,localhost,root,,);
connection master;
......
......@@ -178,7 +178,16 @@ show warnings;
desc t3;
drop table t1,t2,t3;
# End of 4.1 tests
#
# Bug #22129: A small double precision number becomes zero
#
# check if underflows are detected correctly
select 1e-308, 1.00000001e-300, 100000000e-300;
# check if overflows are detected correctly
select 10e307;
--echo End of 4.1 tests
#
# bug #12694 (float(m,d) specifications)
......
......@@ -18,42 +18,3 @@ create table nu (a int);
drop table nu;
# End of 4.1 tests
#
# Bug #20789: Merge Subtable Rename Causes Crash
#
CREATE TABLE `t1` (
`TIM` datetime NOT NULL,
`VAL` double default NULL
) ENGINE=MyISAM DEFAULT CHARSET=latin1;
CREATE TABLE `t2` (
`TIM` datetime NOT NULL,
`VAL` double default NULL
) ENGINE=MyISAM DEFAULT CHARSET=latin1;
CREATE TABLE `mt` (
`TIM` datetime NOT NULL,
`VAL` double default NULL
) ENGINE=MRG_MyISAM DEFAULT CHARSET=latin1 INSERT_METHOD=LAST
UNION=(`t1`,`t2`);
# insert into the merge table and thus open it.
INSERT INTO mt VALUES ('2006-01-01',0);
# Alter one of the tables that are part of the merge table
ALTER TABLE `t2` RENAME TO `t`;
# Insert into the merge table that has just been altered
--error 1015
INSERT INTO mt VALUES ('2006-01-01',0);
--error 1015
select * from mt;
FLUSH TABLES;
--error 1017
select * from mt;
# Alter one of the tables that are part of the merge table
ALTER TABLE `t` RENAME TO `t2`;
INSERT INTO mt VALUES ('2006-01-01',0);
select * from mt;
......@@ -27,7 +27,7 @@ INCLUDES = @ZLIB_INCLUDES@ \
WRAPLIBS= @WRAPLIBS@
SUBDIRS = share
libexec_PROGRAMS = mysqld
noinst_PROGRAMS = gen_lex_hash
EXTRA_PROGRAMS = gen_lex_hash
bin_PROGRAMS = mysql_tzinfo_to_sql
gen_lex_hash_LDFLAGS = @NOINST_LDFLAGS@
LDADD = $(top_builddir)/vio/libvio.a \
......@@ -157,7 +157,11 @@ sql_yacc.o: sql_yacc.cc sql_yacc.h $(HEADERS)
@echo "If it fails, re-run configure with --with-low-memory"
$(CXXCOMPILE) $(LM_CFLAGS) -c $<
lex_hash.h: gen_lex_hash$(EXEEXT)
# This generates lex_hash.h
# NOTE Built sources should depend on their sources not the tool
# this avoid the rebuild of the built files in a source dist
lex_hash.h: gen_lex_hash.cc lex.h
$(MAKE) $(AM_MAKEFLAGS) gen_lex_hash$(EXEEXT)
./gen_lex_hash$(EXEEXT) > $@
# the following three should eventually be moved out of this directory
......
......@@ -1353,7 +1353,7 @@ event_tail:
Lex->sql_command= SQLCOM_CREATE_EVENT;
Lex->expr_allows_subselect= TRUE;
}
;
ev_schedule_time: EVERY_SYM expr interval
{
......
This diff is collapsed.
This diff is collapsed.
......@@ -76,20 +76,20 @@ btr_pcur_store_position(
{
page_cur_t* page_cursor;
rec_t* rec;
dict_tree_t* tree;
dict_index_t* index;
page_t* page;
ulint offs;
ut_a(cursor->pos_state == BTR_PCUR_IS_POSITIONED);
ut_ad(cursor->latch_mode != BTR_NO_LATCHES);
tree = btr_cur_get_tree(btr_pcur_get_btr_cur(cursor));
index = btr_cur_get_index(btr_pcur_get_btr_cur(cursor));
page_cursor = btr_pcur_get_page_cur(cursor);
rec = page_cur_get_rec(page_cursor);
page = ut_align_down(rec, UNIV_PAGE_SIZE);
offs = ut_align_offset(rec, UNIV_PAGE_SIZE);
page = page_align(rec);
offs = page_offset(rec);
ut_ad(mtr_memo_contains(mtr, buf_block_align(page),
MTR_MEMO_PAGE_S_FIX)
......@@ -133,13 +133,13 @@ btr_pcur_store_position(
}
cursor->old_stored = BTR_PCUR_OLD_STORED;
cursor->old_rec = dict_tree_copy_rec_order_prefix
(tree, rec, &cursor->old_n_fields,
cursor->old_rec = dict_index_copy_rec_order_prefix(
index, rec, &cursor->old_n_fields,
&cursor->old_rec_buf, &cursor->buf_size);
cursor->block_when_stored = buf_block_align(page);
cursor->modify_clock = buf_block_get_modify_clock
(cursor->block_when_stored);
cursor->modify_clock = buf_block_get_modify_clock(
cursor->block_when_stored);
}
/******************************************************************
......@@ -197,13 +197,15 @@ btr_pcur_restore_position(
btr_pcur_t* cursor, /* in: detached persistent cursor */
mtr_t* mtr) /* in: mtr */
{
dict_tree_t* tree;
dict_index_t* index;
page_t* page;
dtuple_t* tuple;
ulint mode;
ulint old_mode;
mem_heap_t* heap;
index = btr_cur_get_index(btr_pcur_get_btr_cur(cursor));
if (UNIV_UNLIKELY(cursor->old_stored != BTR_PCUR_OLD_STORED)
|| UNIV_UNLIKELY(cursor->pos_state != BTR_PCUR_WAS_POSITIONED
&& cursor->pos_state != BTR_PCUR_IS_POSITIONED)) {
......@@ -215,17 +217,16 @@ btr_pcur_restore_position(
ut_error;
}
if (UNIV_UNLIKELY
(cursor->rel_pos == BTR_PCUR_AFTER_LAST_IN_TREE
if (UNIV_UNLIKELY(
cursor->rel_pos == BTR_PCUR_AFTER_LAST_IN_TREE
|| cursor->rel_pos == BTR_PCUR_BEFORE_FIRST_IN_TREE)) {
/* In these cases we do not try an optimistic restoration,
but always do a search */
btr_cur_open_at_index_side
(cursor->rel_pos == BTR_PCUR_BEFORE_FIRST_IN_TREE,
btr_pcur_get_btr_cur(cursor)->index, latch_mode,
btr_pcur_get_btr_cur(cursor), mtr);
btr_cur_open_at_index_side(
cursor->rel_pos == BTR_PCUR_BEFORE_FIRST_IN_TREE,
index, latch_mode, btr_pcur_get_btr_cur(cursor), mtr);
cursor->block_when_stored
= buf_block_align(btr_pcur_get_page(cursor));
......@@ -242,8 +243,8 @@ btr_pcur_restore_position(
|| UNIV_LIKELY(latch_mode == BTR_MODIFY_LEAF)) {
/* Try optimistic restoration */
if (UNIV_LIKELY
(buf_page_optimistic_get(latch_mode,
if (UNIV_LIKELY(buf_page_optimistic_get(
latch_mode,
cursor->block_when_stored, page,
cursor->modify_clock, mtr))) {
cursor->pos_state = BTR_PCUR_IS_POSITIONED;
......@@ -255,19 +256,17 @@ btr_pcur_restore_position(
rec_t* rec;
ulint* offsets1;
ulint* offsets2;
dict_index_t* index;
#endif /* UNIV_DEBUG */
cursor->latch_mode = latch_mode;
#ifdef UNIV_DEBUG
rec = btr_pcur_get_rec(cursor);
index = btr_pcur_get_btr_cur(cursor)->index;
heap = mem_heap_create(256);
offsets1 = rec_get_offsets
(cursor->old_rec, index, NULL,
offsets1 = rec_get_offsets(
cursor->old_rec, index, NULL,
cursor->old_n_fields, &heap);
offsets2 = rec_get_offsets
(rec, index, NULL,
offsets2 = rec_get_offsets(
rec, index, NULL,
cursor->old_n_fields, &heap);
ut_ad(!cmp_rec_rec(cursor->old_rec,
......@@ -286,8 +285,7 @@ btr_pcur_restore_position(
heap = mem_heap_create(256);
tree = btr_cur_get_tree(btr_pcur_get_btr_cur(cursor));
tuple = dict_tree_build_data_tuple(tree, cursor->old_rec,
tuple = dict_index_build_data_tuple(index, cursor->old_rec,
cursor->old_n_fields, heap);
/* Save the old search mode of the cursor */
......@@ -302,8 +300,8 @@ btr_pcur_restore_position(
mode = PAGE_CUR_L;
}
btr_pcur_open_with_no_init(btr_pcur_get_btr_cur(cursor)->index, tuple,
mode, latch_mode, cursor, 0, mtr);
btr_pcur_open_with_no_init(index, tuple, mode, latch_mode,
cursor, 0, mtr);
/* Restore the old search mode */
cursor->search_mode = old_mode;
......@@ -311,19 +309,18 @@ btr_pcur_restore_position(
if (cursor->rel_pos == BTR_PCUR_ON
&& btr_pcur_is_on_user_rec(cursor, mtr)
&& 0 == cmp_dtuple_rec(tuple, btr_pcur_get_rec(cursor),
rec_get_offsets
(btr_pcur_get_rec(cursor),
btr_pcur_get_btr_cur(cursor)->index,
rec_get_offsets(
btr_pcur_get_rec(cursor), index,
NULL, ULINT_UNDEFINED, &heap))) {
/* We have to store the NEW value for the modify clock, since
the cursor can now be on a different page! But we can retain
the value of old_rec */
cursor->block_when_stored = buf_block_align
(btr_pcur_get_page(cursor));
cursor->modify_clock = buf_block_get_modify_clock
(cursor->block_when_stored);
cursor->block_when_stored = buf_block_align(
btr_pcur_get_page(cursor));
cursor->modify_clock = buf_block_get_modify_clock(
cursor->block_when_stored);
cursor->old_stored = BTR_PCUR_OLD_STORED;
mem_heap_free(heap);
......
This diff is collapsed.
......@@ -350,8 +350,8 @@ buf_page_is_corrupted(
if (srv_use_checksums) {
old_checksum = buf_calc_page_old_checksum(read_buf);
old_checksum_field = mach_read_from_4
(read_buf + UNIV_PAGE_SIZE
old_checksum_field = mach_read_from_4(
read_buf + UNIV_PAGE_SIZE
- FIL_PAGE_END_LSN_OLD_CHKSUM);
/* There are 2 valid formulas for old_checksum_field:
......@@ -459,8 +459,8 @@ buf_page_print(
if (dict_sys != NULL) {
index = dict_index_find_on_id_low
(btr_page_get_index_id(read_buf));
index = dict_index_find_on_id_low(
btr_page_get_index_id(read_buf));
if (index) {
fputs("InnoDB: (", stderr);
dict_index_name_print(stderr, NULL, index);
......@@ -598,8 +598,8 @@ buf_pool_init(
/* Allocate the virtual address space window, i.e., the
buffer pool frames */
buf_pool->frame_mem = os_awe_allocate_virtual_mem_window
(UNIV_PAGE_SIZE * (n_frames + 1));
buf_pool->frame_mem = os_awe_allocate_virtual_mem_window(
UNIV_PAGE_SIZE * (n_frames + 1));
/* Allocate the physical memory for AWE and the AWE info array
for buf_pool */
......@@ -625,8 +625,8 @@ buf_pool_init(
}
/*----------------------------------------*/
} else {
buf_pool->frame_mem = os_mem_alloc_large
(UNIV_PAGE_SIZE * (n_frames + 1), TRUE, FALSE);
buf_pool->frame_mem = os_mem_alloc_large(
UNIV_PAGE_SIZE * (n_frames + 1), TRUE, FALSE);
}
if (buf_pool->frame_mem == NULL) {
......@@ -821,8 +821,8 @@ buf_awe_map_page_to_frame(
} else {
/* We can map block to the frame of bck */
os_awe_map_physical_mem_to_window
(bck->frame,
os_awe_map_physical_mem_to_window(
bck->frame,
UNIV_PAGE_SIZE / OS_AWE_X86_PAGE_SIZE,
block->awe_info);
......@@ -840,8 +840,8 @@ buf_awe_map_page_to_frame(
bck);
if (add_to_mapped_list) {
UT_LIST_ADD_FIRST
(awe_LRU_free_mapped,
UT_LIST_ADD_FIRST(
awe_LRU_free_mapped,
buf_pool->awe_LRU_free_mapped,
block);
}
......@@ -1583,7 +1583,7 @@ buf_page_init_for_backup_restore(
block->is_hashed = FALSE;
block->n_fields = 1;
block->n_bytes = 0;
block->side = BTR_SEARCH_LEFT_SIDE;
block->left_side = TRUE;
block->file_page_was_freed = FALSE;
}
......@@ -1650,7 +1650,7 @@ buf_page_init(
block->is_hashed = FALSE;
block->n_fields = 1;
block->n_bytes = 0;
block->side = BTR_SEARCH_LEFT_SIDE;
block->left_side = TRUE;
block->file_page_was_freed = FALSE;
}
......@@ -1710,8 +1710,8 @@ buf_page_init_for_read(
mutex_enter(&(buf_pool->mutex));
if (fil_tablespace_deleted_or_being_deleted_in_mem
(space, tablespace_version)) {
if (fil_tablespace_deleted_or_being_deleted_in_mem(
space, tablespace_version)) {
*err = DB_TABLESPACE_DELETED;
}
......@@ -1891,10 +1891,10 @@ buf_page_io_complete(
/* If this page is not uninitialized and not in the
doublewrite buffer, then the page number and space id
should be the same as in block. */
ulint read_page_no = mach_read_from_4
(block->frame + FIL_PAGE_OFFSET);
ulint read_space_id = mach_read_from_4
(block->frame + FIL_PAGE_ARCH_LOG_NO_OR_SPACE_ID);
ulint read_page_no = mach_read_from_4(
block->frame + FIL_PAGE_OFFSET);
ulint read_space_id = mach_read_from_4(
block->frame + FIL_PAGE_ARCH_LOG_NO_OR_SPACE_ID);
if (!block->space
&& trx_doublewrite_page_inside(block->offset)) {
......@@ -1980,9 +1980,9 @@ buf_page_io_complete(
}
if (!recv_no_ibuf_operations) {
ibuf_merge_or_delete_for_page
(block->frame, block->space,
block->offset, TRUE);
ibuf_merge_or_delete_for_page(
block->frame, block->space, block->offset,
TRUE);
}
}
......@@ -2110,8 +2110,9 @@ buf_validate(void)
if (block->flush_type == BUF_FLUSH_LRU) {
n_lru_flush++;
ut_a(rw_lock_is_locked
(&block->lock, RW_LOCK_SHARED));
ut_a(rw_lock_is_locked(
&block->lock,
RW_LOCK_SHARED));
} else if (block->flush_type
== BUF_FLUSH_LIST) {
n_list_flush++;
......
......@@ -890,8 +890,8 @@ buf_flush_batch(
old_page_count = page_count;
/* Try to flush also all the neighbors */
page_count += buf_flush_try_neighbors
(space, offset, flush_type);
page_count += buf_flush_try_neighbors(
space, offset, flush_type);
/* fprintf(stderr,
"Flush type %lu, page no %lu, neighb %lu\n",
flush_type, offset,
......
......@@ -542,16 +542,16 @@ buf_LRU_old_adjust_len(void)
if (old_len < new_len - BUF_LRU_OLD_TOLERANCE) {
buf_pool->LRU_old = UT_LIST_GET_PREV
(LRU, buf_pool->LRU_old);
buf_pool->LRU_old = UT_LIST_GET_PREV(
LRU, buf_pool->LRU_old);
(buf_pool->LRU_old)->old = TRUE;
buf_pool->LRU_old_len++;
} else if (old_len > new_len + BUF_LRU_OLD_TOLERANCE) {
(buf_pool->LRU_old)->old = FALSE;
buf_pool->LRU_old = UT_LIST_GET_NEXT
(LRU, buf_pool->LRU_old);
buf_pool->LRU_old = UT_LIST_GET_NEXT(
LRU, buf_pool->LRU_old);
buf_pool->LRU_old_len--;
} else {
ut_a(buf_pool->LRU_old); /* Check that we did not
......
......@@ -257,8 +257,8 @@ buf_read_ahead_random(
mode: hence FALSE as the first parameter */
if (!ibuf_bitmap_page(i)) {
count += buf_read_page_low
(&err, FALSE,
count += buf_read_page_low(
&err, FALSE,
ibuf_mode | OS_AIO_SIMULATED_WAKE_LATER,
space, tablespace_version, i);
if (err == DB_TABLESPACE_DELETED) {
......@@ -549,8 +549,8 @@ buf_read_ahead_linear(
aio mode: hence FALSE as the first parameter */
if (!ibuf_bitmap_page(i)) {
count += buf_read_page_low
(&err, FALSE,
count += buf_read_page_low(
&err, FALSE,
ibuf_mode | OS_AIO_SIMULATED_WAKE_LATER,
space, tablespace_version, i);
if (err == DB_TABLESPACE_DELETED) {
......
......@@ -540,8 +540,8 @@ dtuple_convert_big_rec(
n_fields = 0;
while (rec_get_converted_size(index, entry)
>= ut_min(page_get_free_space_of_empty
(dict_table_is_comp(index->table)) / 2,
>= ut_min(page_get_free_space_of_empty(
dict_table_is_comp(index->table)) / 2,
REC_MAX_DATA_SIZE)) {
longest = 0;
......@@ -610,8 +610,8 @@ dtuple_convert_big_rec(
vector->fields[n_fields].len = dfield->len
- DICT_MAX_INDEX_COL_LEN;
vector->fields[n_fields].data = mem_heap_alloc
(heap, vector->fields[n_fields].len);
vector->fields[n_fields].data = mem_heap_alloc(
heap, vector->fields[n_fields].len);
/* Copy data (from the end of field) to big rec vector */
......
......@@ -40,9 +40,6 @@ charset-collation code for them. */
ulint data_mysql_default_charset_coll = 99999999;
dtype_t dtype_binary_val = {DATA_BINARY, 0, 0, 0, 0, 0};
dtype_t* dtype_binary = &dtype_binary_val;
/*************************************************************************
Determine how many bytes the first n characters of the given string occupy.
If the string is shorter than n characters, returns the number of bytes
......@@ -53,7 +50,11 @@ dtype_get_at_most_n_mbchars(
/*========================*/
/* out: length of the prefix,
in bytes */
const dtype_t* dtype, /* in: data type */
ulint prtype, /* in: precise type */
ulint mbminlen, /* in: minimum length of a
multi-byte character */
ulint mbmaxlen, /* in: maximum length of a
multi-byte character */
ulint prefix_len, /* in: length of the requested
prefix, in characters, multiplied by
dtype_get_mbmaxlen(dtype) */
......@@ -63,12 +64,12 @@ dtype_get_at_most_n_mbchars(
{
#ifndef UNIV_HOTBACKUP
ut_a(data_len != UNIV_SQL_NULL);
ut_ad(!dtype->mbmaxlen || !(prefix_len % dtype->mbmaxlen));
ut_ad(!mbmaxlen || !(prefix_len % mbmaxlen));
if (dtype->mbminlen != dtype->mbmaxlen) {
ut_a(!(prefix_len % dtype->mbmaxlen));
return(innobase_get_at_most_n_mbchars
(dtype_get_charset_coll(dtype->prtype),
if (mbminlen != mbmaxlen) {
ut_a(!(prefix_len % mbmaxlen));
return(innobase_get_at_most_n_mbchars(
dtype_get_charset_coll(prtype),
prefix_len, data_len, str));
}
......@@ -270,8 +271,6 @@ dtype_print(
} else if (prtype == DATA_TRX_ID) {
fputs("DATA_TRX_ID", stderr);
len = DATA_TRX_ID_LEN;
} else if (prtype == DATA_MIX_ID) {
fputs("DATA_MIX_ID", stderr);
} else if (prtype == DATA_ENGLISH) {
fputs("DATA_ENGLISH", stderr);
} else {
......@@ -291,38 +290,5 @@ dtype_print(
}
}
fprintf(stderr, " len %lu prec %lu", (ulong) len, (ulong) type->prec);
}
/***************************************************************************
Returns the maximum size of a data type. Note: types in system tables may be
incomplete and return incorrect information. */
ulint
dtype_get_max_size(
/*===============*/
/* out: maximum size (ULINT_MAX for
unbounded types) */
const dtype_t* type) /* in: type */
{
switch (type->mtype) {
case DATA_SYS:
case DATA_CHAR:
case DATA_FIXBINARY:
case DATA_INT:
case DATA_FLOAT:
case DATA_DOUBLE:
case DATA_MYSQL:
case DATA_VARCHAR:
case DATA_BINARY:
case DATA_DECIMAL:
case DATA_VARMYSQL:
return(type->len);
case DATA_BLOB:
return(ULINT_MAX);
default:
ut_error;
}
return(ULINT_MAX);
fprintf(stderr, " len %lu", (ulong) len);
}
......@@ -58,8 +58,7 @@ dict_hdr_get_new_id(
dulint id;
mtr_t mtr;
ut_ad((type == DICT_HDR_TABLE_ID) || (type == DICT_HDR_INDEX_ID)
|| (type == DICT_HDR_MIX_ID));
ut_ad((type == DICT_HDR_TABLE_ID) || (type == DICT_HDR_INDEX_ID));
mtr_start(&mtr);
......@@ -141,6 +140,7 @@ dict_hdr_create(
mlog_write_dulint(dict_header + DICT_HDR_INDEX_ID,
ut_dulint_create(0, DICT_HDR_FIRST_ID), mtr);
/* Obsolete, but we must initialize it to 0 anyway. */
mlog_write_dulint(dict_header + DICT_HDR_MIX_ID,
ut_dulint_create(0, DICT_HDR_FIRST_ID), mtr);
......@@ -214,7 +214,6 @@ dict_boot(void)
dict_index_t* index;
dict_hdr_t* dict_hdr;
mtr_t mtr;
ibool success;
mtr_start(&mtr);
......@@ -236,9 +235,9 @@ dict_boot(void)
..._MARGIN, it will immediately be updated to the disk-based
header. */
dict_sys->row_id = ut_dulint_add
(ut_dulint_align_up(mtr_read_dulint
(dict_hdr + DICT_HDR_ROW_ID, &mtr),
dict_sys->row_id = ut_dulint_add(
ut_dulint_align_up(mtr_read_dulint(dict_hdr + DICT_HDR_ROW_ID,
&mtr),
DICT_HDR_ROW_ID_WRITE_MARGIN),
DICT_HDR_ROW_ID_WRITE_MARGIN);
......@@ -247,14 +246,14 @@ dict_boot(void)
/*-------------------------*/
table = dict_mem_table_create("SYS_TABLES", DICT_HDR_SPACE, 8, 0);
dict_mem_table_add_col(table, "NAME", DATA_BINARY, 0, 0, 0);
dict_mem_table_add_col(table, "ID", DATA_BINARY, 0, 0, 0);
dict_mem_table_add_col(table, "N_COLS", DATA_INT, 0, 4, 0);
dict_mem_table_add_col(table, "TYPE", DATA_INT, 0, 4, 0);
dict_mem_table_add_col(table, "MIX_ID", DATA_BINARY, 0, 0, 0);
dict_mem_table_add_col(table, "MIX_LEN", DATA_INT, 0, 4, 0);
dict_mem_table_add_col(table, "CLUSTER_NAME", DATA_BINARY, 0, 0, 0);
dict_mem_table_add_col(table, "SPACE", DATA_INT, 0, 4, 0);
dict_mem_table_add_col(table, "NAME", DATA_BINARY, 0, 0);
dict_mem_table_add_col(table, "ID", DATA_BINARY, 0, 0);
dict_mem_table_add_col(table, "N_COLS", DATA_INT, 0, 4);
dict_mem_table_add_col(table, "TYPE", DATA_INT, 0, 4);
dict_mem_table_add_col(table, "MIX_ID", DATA_BINARY, 0, 0);
dict_mem_table_add_col(table, "MIX_LEN", DATA_INT, 0, 4);
dict_mem_table_add_col(table, "CLUSTER_NAME", DATA_BINARY, 0, 0);
dict_mem_table_add_col(table, "SPACE", DATA_INT, 0, 4);
table->id = DICT_TABLES_ID;
......@@ -269,31 +268,30 @@ dict_boot(void)
index->id = DICT_TABLES_ID;
success = dict_index_add_to_cache(table, index, mtr_read_ulint
(dict_hdr + DICT_HDR_TABLES,
dict_index_add_to_cache(table, index,
mtr_read_ulint(dict_hdr + DICT_HDR_TABLES,
MLOG_4BYTES, &mtr));
ut_a(success);
/*-------------------------*/
index = dict_mem_index_create("SYS_TABLES", "ID_IND",
DICT_HDR_SPACE, DICT_UNIQUE, 1);
dict_mem_index_add_field(index, "ID", 0);
index->id = DICT_TABLE_IDS_ID;
success = dict_index_add_to_cache(table, index,
mtr_read_ulint
(dict_hdr + DICT_HDR_TABLE_IDS,
dict_index_add_to_cache(table, index,
mtr_read_ulint(dict_hdr + DICT_HDR_TABLE_IDS,
MLOG_4BYTES, &mtr));
ut_a(success);
/*-------------------------*/
table = dict_mem_table_create("SYS_COLUMNS", DICT_HDR_SPACE, 7, 0);
dict_mem_table_add_col(table, "TABLE_ID", DATA_BINARY,0,0,0);
dict_mem_table_add_col(table, "POS", DATA_INT, 0, 4, 0);
dict_mem_table_add_col(table, "NAME", DATA_BINARY, 0, 0, 0);
dict_mem_table_add_col(table, "MTYPE", DATA_INT, 0, 4, 0);
dict_mem_table_add_col(table, "PRTYPE", DATA_INT, 0, 4, 0);
dict_mem_table_add_col(table, "LEN", DATA_INT, 0, 4, 0);
dict_mem_table_add_col(table, "PREC", DATA_INT, 0, 4, 0);
dict_mem_table_add_col(table, "TABLE_ID", DATA_BINARY, 0, 0);
dict_mem_table_add_col(table, "POS", DATA_INT, 0, 4);
dict_mem_table_add_col(table, "NAME", DATA_BINARY, 0, 0);
dict_mem_table_add_col(table, "MTYPE", DATA_INT, 0, 4);
dict_mem_table_add_col(table, "PRTYPE", DATA_INT, 0, 4);
dict_mem_table_add_col(table, "LEN", DATA_INT, 0, 4);
dict_mem_table_add_col(table, "PREC", DATA_INT, 0, 4);
table->id = DICT_COLUMNS_ID;
......@@ -308,20 +306,20 @@ dict_boot(void)
dict_mem_index_add_field(index, "POS", 0);
index->id = DICT_COLUMNS_ID;
success = dict_index_add_to_cache(table, index, mtr_read_ulint
(dict_hdr + DICT_HDR_COLUMNS,
dict_index_add_to_cache(table, index,
mtr_read_ulint(dict_hdr + DICT_HDR_COLUMNS,
MLOG_4BYTES, &mtr));
ut_a(success);
/*-------------------------*/
table = dict_mem_table_create("SYS_INDEXES", DICT_HDR_SPACE, 7, 0);
dict_mem_table_add_col(table, "TABLE_ID", DATA_BINARY, 0,0,0);
dict_mem_table_add_col(table, "ID", DATA_BINARY, 0, 0, 0);
dict_mem_table_add_col(table, "NAME", DATA_BINARY, 0, 0, 0);
dict_mem_table_add_col(table, "N_FIELDS", DATA_INT, 0, 4, 0);
dict_mem_table_add_col(table, "TYPE", DATA_INT, 0, 4, 0);
dict_mem_table_add_col(table, "SPACE", DATA_INT, 0, 4, 0);
dict_mem_table_add_col(table, "PAGE_NO", DATA_INT, 0, 4, 0);
dict_mem_table_add_col(table, "TABLE_ID", DATA_BINARY, 0, 0);
dict_mem_table_add_col(table, "ID", DATA_BINARY, 0, 0);
dict_mem_table_add_col(table, "NAME", DATA_BINARY, 0, 0);
dict_mem_table_add_col(table, "N_FIELDS", DATA_INT, 0, 4);
dict_mem_table_add_col(table, "TYPE", DATA_INT, 0, 4);
dict_mem_table_add_col(table, "SPACE", DATA_INT, 0, 4);
dict_mem_table_add_col(table, "PAGE_NO", DATA_INT, 0, 4);
/* The '+ 2' below comes from the 2 system fields */
#if DICT_SYS_INDEXES_PAGE_NO_FIELD != 6 + 2
......@@ -346,16 +344,16 @@ dict_boot(void)
dict_mem_index_add_field(index, "ID", 0);
index->id = DICT_INDEXES_ID;
success = dict_index_add_to_cache(table, index, mtr_read_ulint
(dict_hdr + DICT_HDR_INDEXES,
dict_index_add_to_cache(table, index,
mtr_read_ulint(dict_hdr + DICT_HDR_INDEXES,
MLOG_4BYTES, &mtr));
ut_a(success);
/*-------------------------*/
table = dict_mem_table_create("SYS_FIELDS", DICT_HDR_SPACE, 3, 0);
dict_mem_table_add_col(table, "INDEX_ID", DATA_BINARY, 0,0,0);
dict_mem_table_add_col(table, "POS", DATA_INT, 0, 4, 0);
dict_mem_table_add_col(table, "COL_NAME", DATA_BINARY, 0,0,0);
dict_mem_table_add_col(table, "INDEX_ID", DATA_BINARY, 0, 0);
dict_mem_table_add_col(table, "POS", DATA_INT, 0, 4);
dict_mem_table_add_col(table, "COL_NAME", DATA_BINARY, 0, 0);
table->id = DICT_FIELDS_ID;
dict_table_add_to_cache(table);
......@@ -369,10 +367,9 @@ dict_boot(void)
dict_mem_index_add_field(index, "POS", 0);
index->id = DICT_FIELDS_ID;
success = dict_index_add_to_cache(table, index, mtr_read_ulint
(dict_hdr + DICT_HDR_FIELDS,
dict_index_add_to_cache(table, index,
mtr_read_ulint(dict_hdr + DICT_HDR_FIELDS,
MLOG_4BYTES, &mtr));
ut_a(success);
mtr_commit(&mtr);
/*-------------------------*/
......
......@@ -78,14 +78,14 @@ dict_create_sys_tables_tuple(
mach_write_to_4(ptr, DICT_TABLE_ORDINARY);
dfield_set_data(dfield, ptr, 4);
/* 6: MIX_ID ---------------------------*/
/* 6: MIX_ID (obsolete) ---------------------------*/
dfield = dtuple_get_nth_field(entry, 4);
ptr = mem_heap_alloc(heap, 8);
memset(ptr, 0, 8);
dfield_set_data(dfield, ptr, 8);
/* 7: MIX_LEN --------------------------*/
/* 7: MIX_LEN (obsolete) --------------------------*/
dfield = dtuple_get_nth_field(entry, 5);
......@@ -126,9 +126,10 @@ dict_create_sys_columns_tuple(
{
dict_table_t* sys_columns;
dtuple_t* entry;
dict_col_t* column;
const dict_col_t* column;
dfield_t* dfield;
byte* ptr;
const char* col_name;
ut_ad(table && heap);
......@@ -155,33 +156,34 @@ dict_create_sys_columns_tuple(
/* 4: NAME ---------------------------*/
dfield = dtuple_get_nth_field(entry, 2);
dfield_set_data(dfield, column->name, ut_strlen(column->name));
col_name = dict_table_get_col_name(table, i);
dfield_set_data(dfield, col_name, ut_strlen(col_name));
/* 5: MTYPE --------------------------*/
dfield = dtuple_get_nth_field(entry, 3);
ptr = mem_heap_alloc(heap, 4);
mach_write_to_4(ptr, (column->type).mtype);
mach_write_to_4(ptr, column->mtype);
dfield_set_data(dfield, ptr, 4);
/* 6: PRTYPE -------------------------*/
dfield = dtuple_get_nth_field(entry, 4);
ptr = mem_heap_alloc(heap, 4);
mach_write_to_4(ptr, (column->type).prtype);
mach_write_to_4(ptr, column->prtype);
dfield_set_data(dfield, ptr, 4);
/* 7: LEN ----------------------------*/
dfield = dtuple_get_nth_field(entry, 5);
ptr = mem_heap_alloc(heap, 4);
mach_write_to_4(ptr, (column->type).len);
mach_write_to_4(ptr, column->len);
dfield_set_data(dfield, ptr, 4);
/* 8: PREC ---------------------------*/
dfield = dtuple_get_nth_field(entry, 6);
ptr = mem_heap_alloc(heap, 4);
mach_write_to_4(ptr, (column->type).prec);
mach_write_to_4(ptr, 0/* unused */);
dfield_set_data(dfield, ptr, 4);
/*---------------------------------*/
......@@ -222,8 +224,7 @@ dict_build_table_def_step(
row_len = 0;
for (i = 0; i < table->n_def; i++) {
row_len += dtype_get_min_size(dict_col_get_type
(&table->cols[i]));
row_len += dict_col_get_min_size(&table->cols[i]);
}
if (row_len > BTR_PAGE_MAX_REC_SIZE) {
return(DB_TOO_BIG_RECORD);
......@@ -238,7 +239,7 @@ dict_build_table_def_step(
- page 3 will contain the root of the clustered index of the
table we create here. */
table->space = 0; /* reset to zero for the call below */
ulint space = 0; /* reset to zero for the call below */
if (table->dir_path_of_temp_table) {
/* We place tables created with CREATE TEMPORARY
......@@ -251,9 +252,11 @@ dict_build_table_def_step(
is_path = FALSE;
}
error = fil_create_new_single_table_tablespace
(&table->space, path_or_name, is_path,
error = fil_create_new_single_table_tablespace(
&space, path_or_name, is_path,
FIL_IBD_FILE_INITIAL_SIZE);
table->space = space;
if (error != DB_SUCCESS) {
return(error);
......@@ -768,8 +771,8 @@ dict_truncate_index_tree(
appropriate field in the SYS_INDEXES record: this mini-transaction
marks the B-tree totally truncated */
comp = page_is_comp(btr_page_get
(space, root_page_no, RW_X_LATCH, mtr));
comp = page_is_comp(btr_page_get(space, root_page_no, RW_X_LATCH,
mtr));
btr_free_root(space, root_page_no, mtr);
/* We will temporarily write FIL_NULL to the PAGE_NO field
......@@ -798,7 +801,7 @@ dict_truncate_index_tree(
root_page_no = btr_create(type, space, index_id, comp, mtr);
if (index) {
index->tree->page = root_page_no;
index->page = root_page_no;
} else {
ut_print_timestamp(stderr);
fprintf(stderr,
......@@ -1004,7 +1007,6 @@ dict_create_index_step(
que_thr_t* thr) /* in: query thread */
{
ind_node_t* node;
ibool success;
ulint err = DB_ERROR;
trx_t* trx;
......@@ -1088,11 +1090,9 @@ dict_create_index_step(
if (node->state == INDEX_ADD_TO_CACHE) {
success = dict_index_add_to_cache(node->table, node->index,
dict_index_add_to_cache(node->table, node->index,
node->page_no);
ut_a(success);
err = DB_SUCCESS;
}
......@@ -1328,8 +1328,9 @@ dict_create_add_foreign_field_to_dictionary(
pars_info_add_str_literal(info, "ref_col_name",
foreign->referenced_col_names[field_nr]);
return(dict_foreign_eval_sql
(info, "PROCEDURE P () IS\n"
return(dict_foreign_eval_sql(
info,
"PROCEDURE P () IS\n"
"BEGIN\n"
"INSERT INTO SYS_FOREIGN_COLS VALUES"
"(:id, :pos, :for_col_name, :ref_col_name);\n"
......@@ -1393,8 +1394,8 @@ dict_create_add_foreign_to_dictionary(
}
for (i = 0; i < foreign->n_fields; i++) {
error = dict_create_add_foreign_field_to_dictionary
(i, table, foreign, trx);
error = dict_create_add_foreign_field_to_dictionary(
i, table, foreign, trx);
if (error != DB_SUCCESS) {
......@@ -1450,8 +1451,8 @@ dict_create_add_foreigns_to_dictionary(
foreign;
foreign = UT_LIST_GET_NEXT(foreign_list, foreign)) {
error = dict_create_add_foreign_to_dictionary
(&number, table, foreign, trx);
error = dict_create_add_foreign_to_dictionary(&number, table,
foreign, trx);
if (error != DB_SUCCESS) {
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
......@@ -51,8 +51,8 @@ if_step(
for (;;) {
eval_exp(elsif_node->cond);
if (eval_node_get_ibool_val
(elsif_node->cond)) {
if (eval_node_get_ibool_val(
elsif_node->cond)) {
/* The condition evaluated to TRUE:
start execution from the first
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
......@@ -201,8 +201,6 @@ class ha_innobase: public handler
int cmp_ref(const byte *ref1, const byte *ref2);
bool check_if_incompatible_data(HA_CREATE_INFO *info,
uint table_changes);
void build_template(struct row_prebuilt_struct *prebuilt, THD *thd,
TABLE *table, uint templ_type);
};
extern SHOW_VAR innodb_status_variables[];
......
This diff is collapsed.
This diff is collapsed.
......@@ -204,7 +204,7 @@ btr_node_ptr_get_child_page_no(
fprintf(stderr,
"InnoDB: a nonsensical page number 0"
" in a node ptr record at offset %lu\n",
(ulong) ut_align_offset(rec, UNIV_PAGE_SIZE));
(ulong) page_offset(rec));
buf_page_print(buf_frame_align(rec));
}
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
......@@ -334,7 +334,7 @@ dtuple_set_types_binary(
for (i = 0; i < n; i++) {
dfield_type = dfield_get_type(dtuple_get_nth_field(tuple, i));
dtype_set(dfield_type, DATA_BINARY, 0, 0, 0);
dtype_set(dfield_type, DATA_BINARY, 0, 0);
}
}
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment