Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
M
mariadb
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Kirill Smelkov
mariadb
Commits
d3467c0b
Commit
d3467c0b
authored
May 04, 2006
by
jani@hundin.mysql.fi
Browse files
Options
Browse Files
Download
Plain Diff
Merge jamppa@bk-internal.mysql.com:/home/bk/mysql-4.1
into hundin.mysql.fi:/home/jani/mysql-4.1
parents
5e102db3
a5f440f8
Changes
23
Show whitespace changes
Inline
Side-by-side
Showing
23 changed files
with
255 additions
and
54 deletions
+255
-54
innobase/include/dict0dict.ic
innobase/include/dict0dict.ic
+0
-2
mysql-test/mysql-test-run.pl
mysql-test/mysql-test-run.pl
+1
-0
mysql-test/mysql-test-run.sh
mysql-test/mysql-test-run.sh
+1
-0
mysql-test/r/analyze.result
mysql-test/r/analyze.result
+9
-0
mysql-test/r/auto_increment.result
mysql-test/r/auto_increment.result
+24
-0
mysql-test/r/gis-rtree.result
mysql-test/r/gis-rtree.result
+1
-1
mysql-test/r/mysqldump.result
mysql-test/r/mysqldump.result
+38
-0
mysql-test/r/mysqltest.result
mysql-test/r/mysqltest.result
+2
-2
mysql-test/r/ndb_blob.result
mysql-test/r/ndb_blob.result
+13
-5
mysql-test/r/symlink.result
mysql-test/r/symlink.result
+2
-2
mysql-test/t/analyze.test
mysql-test/t/analyze.test
+9
-0
mysql-test/t/auto_increment.test
mysql-test/t/auto_increment.test
+20
-1
mysql-test/t/mysql_client_test.test
mysql-test/t/mysql_client_test.test
+3
-0
mysql-test/t/mysqldump.test
mysql-test/t/mysqldump.test
+31
-1
mysql-test/t/mysqltest.test
mysql-test/t/mysqltest.test
+1
-0
mysql-test/t/ndb_blob.test
mysql-test/t/ndb_blob.test
+22
-3
ndb/include/kernel/signaldata/TcKeyReq.hpp
ndb/include/kernel/signaldata/TcKeyReq.hpp
+1
-0
ndb/include/ndbapi/NdbBlob.hpp
ndb/include/ndbapi/NdbBlob.hpp
+1
-0
ndb/src/ndbapi/NdbBlob.cpp
ndb/src/ndbapi/NdbBlob.cpp
+23
-1
ndb/test/ndbapi/testBlobs.cpp
ndb/test/ndbapi/testBlobs.cpp
+9
-14
ndb/tools/delete_all.cpp
ndb/tools/delete_all.cpp
+14
-17
sql/sql_show.cc
sql/sql_show.cc
+18
-0
sql/sql_table.cc
sql/sql_table.cc
+12
-5
No files found.
innobase/include/dict0dict.ic
View file @
d3467c0b
...
...
@@ -93,7 +93,6 @@ dict_table_get_n_user_cols(
{
ut_ad(table);
ut_ad(table->magic_n == DICT_TABLE_MAGIC_N);
ut_ad(table->cached);
return(table->n_cols - DATA_N_SYS_COLS);
}
...
...
@@ -127,7 +126,6 @@ dict_table_get_n_cols(
{
ut_ad(table);
ut_ad(table->magic_n == DICT_TABLE_MAGIC_N);
ut_ad(table->cached);
return(table->n_cols);
}
...
...
mysql-test/mysql-test-run.pl
View file @
d3467c0b
...
...
@@ -678,6 +678,7 @@ sub command_line_setup () {
$glob_use_embedded_server
=
1
;
push
(
@glob_test_mode
,
"
embedded
");
$opt_skip_rpl
=
1
;
# We never run replication with embedded
$opt_skip_ndbcluster
=
1
;
if
(
$opt_extern
)
{
...
...
mysql-test/mysql-test-run.sh
View file @
d3467c0b
...
...
@@ -279,6 +279,7 @@ while test $# -gt 0; do
USE_EMBEDDED_SERVER
=
1
USE_MANAGER
=
0
NO_SLAVE
=
1
USE_RUNNING_SERVER
=
""
USE_NDBCLUSTER
=
""
TEST_MODE
=
"
$TEST_MODE
embedded"
;;
--purify
)
USE_PURIFY
=
1
...
...
mysql-test/r/analyze.result
View file @
d3467c0b
...
...
@@ -37,3 +37,12 @@ Field_name Min_value Max_value Min_length Max_length Empties_or_zeros Nulls Avg_
execute stmt1;
Field_name Min_value Max_value Min_length Max_length Empties_or_zeros Nulls Avg_value_or_avg_length Std Optimal_fieldtype
deallocate prepare stmt1;
create temporary table t1(a int, index(a));
insert into t1 values('1'),('2'),('3'),('4'),('5');
analyze table t1;
Table Op Msg_type Msg_text
test.t1 analyze status OK
show index from t1;
Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment
t1 1 a 1 a A 5 NULL NULL YES BTREE
drop table t1;
mysql-test/r/auto_increment.result
View file @
d3467c0b
...
...
@@ -355,3 +355,27 @@ CHECK TABLE t1;
Table Op Msg_type Msg_text
test.t1 check status OK
DROP TABLE IF EXISTS t1;
CREATE TABLE `t1` (
t1_name VARCHAR(255) DEFAULT NULL,
t1_id INT(10) UNSIGNED NOT NULL AUTO_INCREMENT,
KEY (t1_name),
PRIMARY KEY (t1_id)
) AUTO_INCREMENT = 1000;
INSERT INTO t1 (t1_name) VALUES('MySQL');
INSERT INTO t1 (t1_name) VALUES('MySQL');
INSERT INTO t1 (t1_name) VALUES('MySQL');
SELECT * from t1;
t1_name t1_id
MySQL 1000
MySQL 1001
MySQL 1002
SHOW CREATE TABLE `t1`;
Table Create Table
t1 CREATE TABLE `t1` (
`t1_name` varchar(255) default NULL,
`t1_id` int(10) unsigned NOT NULL auto_increment,
PRIMARY KEY (`t1_id`),
KEY `t1_name` (`t1_name`)
) ENGINE=MyISAM AUTO_INCREMENT=1003 DEFAULT CHARSET=latin1
DROP TABLE `t1`;
End of 4.1 tests
mysql-test/r/gis-rtree.result
View file @
d3467c0b
...
...
@@ -294,7 +294,7 @@ t2 CREATE TABLE `t2` (
`g` geometry NOT NULL default '',
PRIMARY KEY (`fid`),
SPATIAL KEY `g` (`g`(32))
) ENGINE=MyISAM DEFAULT CHARSET=latin1
) ENGINE=MyISAM
AUTO_INCREMENT=101
DEFAULT CHARSET=latin1
SELECT count(*) FROM t2;
count(*)
100
...
...
mysql-test/r/mysqldump.result
View file @
d3467c0b
...
...
@@ -1503,3 +1503,41 @@ select * from t1;
a b
Osnabrck Kln
drop table t1;
create table `t1` (
t1_name varchar(255) default null,
t1_id int(10) unsigned not null auto_increment,
key (t1_name),
primary key (t1_id)
) auto_increment = 1000 default charset=latin1;
insert into t1 (t1_name) values('bla');
insert into t1 (t1_name) values('bla');
insert into t1 (t1_name) values('bla');
select * from t1;
t1_name t1_id
bla 1000
bla 1001
bla 1002
show create table `t1`;
Table Create Table
t1 CREATE TABLE `t1` (
`t1_name` varchar(255) default NULL,
`t1_id` int(10) unsigned NOT NULL auto_increment,
PRIMARY KEY (`t1_id`),
KEY `t1_name` (`t1_name`)
) ENGINE=MyISAM AUTO_INCREMENT=1003 DEFAULT CHARSET=latin1
DROP TABLE `t1`;
select * from t1;
t1_name t1_id
bla 1000
bla 1001
bla 1002
show create table `t1`;
Table Create Table
t1 CREATE TABLE `t1` (
`t1_name` varchar(255) default NULL,
`t1_id` int(10) unsigned NOT NULL auto_increment,
PRIMARY KEY (`t1_id`),
KEY `t1_name` (`t1_name`)
) ENGINE=MyISAM AUTO_INCREMENT=1003 DEFAULT CHARSET=latin1
drop table `t1`;
End of 4.1 tests
mysql-test/r/mysqltest.result
View file @
d3467c0b
select
-1
as "before_use_test" ;
select
0
as "before_use_test" ;
before_use_test
-1
0
select otto from (select 1 as otto) as t1;
otto
1
...
...
mysql-test/r/ndb_blob.result
View file @
d3467c0b
...
...
@@ -481,14 +481,22 @@ msg text NOT NULL
insert into t1 (msg) values(
'Tries to validate (8 byte length + inline bytes) as UTF8 :(
Fast fix: removed validation for Text. It is not yet indexable
so bad data will not crash kernel.
Proper fix: Set inline bytes to multiple of mbmaxlen and
validate it (after the 8 byte length).');
so bad data will not crash kernel.');
select * from t1;
id msg
1 Tries to validate (8 byte length + inline bytes) as UTF8 :(
Fast fix: removed validation for Text. It is not yet indexable
so bad data will not crash kernel.
Proper fix: Set inline bytes to multiple of mbmaxlen and
validate it (after the 8 byte length).
drop table t1;
create table t1 (
a int primary key not null auto_increment,
b text
) engine=ndbcluster;
select count(*) from t1;
count(*)
500
truncate t1;
select count(*) from t1;
count(*)
0
drop table t1;
mysql-test/r/symlink.result
View file @
d3467c0b
...
...
@@ -40,7 +40,7 @@ t9 CREATE TABLE `t9` (
`b` char(16) NOT NULL default '',
`c` int(11) NOT NULL default '0',
PRIMARY KEY (`a`)
) ENGINE=MyISAM DEFAULT CHARSET=latin1 DATA DIRECTORY='TEST_DIR/var/tmp/' INDEX DIRECTORY='TEST_DIR/var/run/'
) ENGINE=MyISAM
AUTO_INCREMENT=16725
DEFAULT CHARSET=latin1 DATA DIRECTORY='TEST_DIR/var/tmp/' INDEX DIRECTORY='TEST_DIR/var/run/'
alter table t9 rename t8, add column d int not null;
alter table t8 rename t7;
rename table t7 to t9;
...
...
@@ -62,7 +62,7 @@ t9 CREATE TABLE `t9` (
`c` int(11) NOT NULL default '0',
`d` int(11) NOT NULL default '0',
PRIMARY KEY (`a`)
) ENGINE=MyISAM DEFAULT CHARSET=latin1 DATA DIRECTORY='TEST_DIR/var/tmp/' INDEX DIRECTORY='TEST_DIR/var/run/'
) ENGINE=MyISAM
AUTO_INCREMENT=16725
DEFAULT CHARSET=latin1 DATA DIRECTORY='TEST_DIR/var/tmp/' INDEX DIRECTORY='TEST_DIR/var/run/'
drop database mysqltest;
create table t1 (a int not null) engine=myisam;
show create table t1;
...
...
mysql-test/t/analyze.test
View file @
d3467c0b
...
...
@@ -48,4 +48,13 @@ execute stmt1;
execute
stmt1
;
deallocate
prepare
stmt1
;
#
# bug#15225 (ANALYZE temporary has no effect)
#
create
temporary
table
t1
(
a
int
,
index
(
a
));
insert
into
t1
values
(
'1'
),(
'2'
),(
'3'
),(
'4'
),(
'5'
);
analyze
table
t1
;
show
index
from
t1
;
drop
table
t1
;
# End of 4.1 tests
mysql-test/t/auto_increment.test
View file @
d3467c0b
...
...
@@ -219,4 +219,23 @@ INSERT INTO t1 (b) VALUES ('bbbb');
CHECK
TABLE
t1
;
DROP
TABLE
IF
EXISTS
t1
;
# End of 4.1 tests
# BUG #19025:
CREATE
TABLE
`t1`
(
t1_name
VARCHAR
(
255
)
DEFAULT
NULL
,
t1_id
INT
(
10
)
UNSIGNED
NOT
NULL
AUTO_INCREMENT
,
KEY
(
t1_name
),
PRIMARY
KEY
(
t1_id
)
)
AUTO_INCREMENT
=
1000
;
INSERT
INTO
t1
(
t1_name
)
VALUES
(
'MySQL'
);
INSERT
INTO
t1
(
t1_name
)
VALUES
(
'MySQL'
);
INSERT
INTO
t1
(
t1_name
)
VALUES
(
'MySQL'
);
SELECT
*
from
t1
;
SHOW
CREATE
TABLE
`t1`
;
DROP
TABLE
`t1`
;
--
echo
End
of
4.1
tests
mysql-test/t/mysql_client_test.test
View file @
d3467c0b
# This test should work in embedded server after we fix mysqltest
--
source
include
/
not_embedded
.
inc
# We run with different binaries for normal and --embedded-server
#
# If this test fails with "command "$MYSQL_CLIENT_TEST" failed",
...
...
mysql-test/t/mysqldump.test
View file @
d3467c0b
...
...
@@ -647,4 +647,34 @@ select * from t1;
select
*
from
t1
;
drop
table
t1
;
# End of 4.1 tests
#
# BUG #19025 mysqldump doesn't correctly dump "auto_increment = [int]"
#
create
table
`t1`
(
t1_name
varchar
(
255
)
default
null
,
t1_id
int
(
10
)
unsigned
not
null
auto_increment
,
key
(
t1_name
),
primary
key
(
t1_id
)
)
auto_increment
=
1000
default
charset
=
latin1
;
insert
into
t1
(
t1_name
)
values
(
'bla'
);
insert
into
t1
(
t1_name
)
values
(
'bla'
);
insert
into
t1
(
t1_name
)
values
(
'bla'
);
select
*
from
t1
;
show
create
table
`t1`
;
--
exec
$MYSQL_DUMP
--
skip
-
comments
test
t1
>
$MYSQLTEST_VARDIR
/
tmp
/
bug19025
.
sql
DROP
TABLE
`t1`
;
--
exec
$MYSQL
test
<
$MYSQLTEST_VARDIR
/
tmp
/
bug19025
.
sql
select
*
from
t1
;
show
create
table
`t1`
;
drop
table
`t1`
;
--
echo
End
of
4.1
tests
mysql-test/t/mysqltest.test
View file @
d3467c0b
--
source
include
/
not_embedded
.
inc
# ============================================================================
#
...
...
mysql-test/t/ndb_blob.test
View file @
d3467c0b
...
...
@@ -403,10 +403,29 @@ create table t1 (
insert
into
t1
(
msg
)
values
(
'Tries to validate (8 byte length + inline bytes) as UTF8 :(
Fast fix: removed validation for Text. It is not yet indexable
so bad data will not crash kernel.
Proper fix: Set inline bytes to multiple of mbmaxlen and
validate it (after the 8 byte length).'
);
so bad data will not crash kernel.'
);
select
*
from
t1
;
drop
table
t1
;
# -- bug #19201
create
table
t1
(
a
int
primary
key
not
null
auto_increment
,
b
text
)
engine
=
ndbcluster
;
--
disable_query_log
set
autocommit
=
1
;
# more rows than batch size (64)
# for this bug no blob parts would be necessary
let
$
1
=
500
;
while
(
$
1
)
{
insert
into
t1
(
b
)
values
(
repeat
(
'x'
,
4000
));
dec
$
1
;
}
--
enable_query_log
select
count
(
*
)
from
t1
;
truncate
t1
;
select
count
(
*
)
from
t1
;
drop
table
t1
;
# End of 4.1 tests
ndb/include/kernel/signaldata/TcKeyReq.hpp
View file @
d3467c0b
...
...
@@ -39,6 +39,7 @@ class TcKeyReq {
friend
class
NdbOperation
;
friend
class
NdbIndexOperation
;
friend
class
NdbScanOperation
;
friend
class
NdbBlob
;
friend
class
DbUtil
;
/**
...
...
ndb/include/ndbapi/NdbBlob.hpp
View file @
d3467c0b
...
...
@@ -275,6 +275,7 @@ private:
bool
isWriteOp
();
bool
isDeleteOp
();
bool
isScanOp
();
bool
isTakeOverOp
();
// computations
Uint32
getPartNumber
(
Uint64
pos
);
Uint32
getPartCount
();
...
...
ndb/src/ndbapi/NdbBlob.cpp
View file @
d3467c0b
...
...
@@ -23,6 +23,7 @@
#include <NdbBlob.hpp>
#include "NdbBlobImpl.hpp"
#include <NdbScanOperation.hpp>
#include <signaldata/TcKeyReq.hpp>
#ifdef NDB_BLOB_DEBUG
#define DBG(x) \
...
...
@@ -290,6 +291,13 @@ NdbBlob::isScanOp()
theNdbOp
->
theOperationType
==
NdbOperation
::
OpenRangeScanRequest
;
}
inline
bool
NdbBlob
::
isTakeOverOp
()
{
return
TcKeyReq
::
getTakeOverScanFlag
(
theNdbOp
->
theScanInfo
);
}
// computations (inline)
inline
Uint32
...
...
@@ -1218,8 +1226,22 @@ NdbBlob::preExecute(ExecType anExecType, bool& batch)
if
(
isUpdateOp
()
||
isWriteOp
()
||
isDeleteOp
())
{
// add operation before this one to read head+inline
NdbOperation
*
tOp
=
theNdbCon
->
getNdbOperation
(
theTable
,
theNdbOp
);
/*
* If main op is from take over scan lock, the added read is done
* as committed read:
*
* In normal transactional case, the row is locked by us and
* committed read returns same as normal read.
*
* In current TRUNCATE TABLE, the deleting trans is committed in
* batches and then restarted with new trans id. A normal read
* would hang on the scan delete lock and then fail.
*/
NdbOperation
::
LockMode
lockMode
=
!
isTakeOverOp
()
?
NdbOperation
::
LM_Read
:
NdbOperation
::
LM_CommittedRead
;
if
(
tOp
==
NULL
||
tOp
->
readTuple
()
==
-
1
||
tOp
->
readTuple
(
lockMode
)
==
-
1
||
setTableKeyValue
(
tOp
)
==
-
1
||
getHeadInlineValue
(
tOp
)
==
-
1
)
{
setErrorCode
(
tOp
);
...
...
ndb/test/ndbapi/testBlobs.cpp
View file @
d3467c0b
...
...
@@ -45,6 +45,7 @@ struct Opt {
bool
m_dbg
;
bool
m_dbgall
;
const
char
*
m_dbug
;
bool
m_fac
;
bool
m_full
;
unsigned
m_loop
;
unsigned
m_parts
;
...
...
@@ -73,6 +74,7 @@ struct Opt {
m_dbg
(
false
),
m_dbgall
(
false
),
m_dbug
(
0
),
m_fac
(
false
),
m_full
(
false
),
m_loop
(
1
),
m_parts
(
10
),
...
...
@@ -111,6 +113,7 @@ printusage()
<<
" -dbg print debug"
<<
endl
<<
" -dbgall print also NDB API debug (if compiled in)"
<<
endl
<<
" -dbug opt dbug options"
<<
endl
<<
" -fac fetch across commit in scan delete ["
<<
d
.
m_fac
<<
"]"
<<
endl
<<
" -full read/write only full blob values"
<<
endl
<<
" -loop N loop N times 0=forever ["
<<
d
.
m_loop
<<
"]"
<<
endl
<<
" -parts N max parts in blob value ["
<<
d
.
m_parts
<<
"]"
<<
endl
...
...
@@ -1260,23 +1263,11 @@ deleteScan(bool idx)
CHK
((
ret
=
rs
->
nextResult
(
false
))
==
0
||
ret
==
1
||
ret
==
2
);
if
(
++
n
==
g_opt
.
m_batch
||
ret
==
2
)
{
DBG
(
"execute batch: n="
<<
n
<<
" ret="
<<
ret
);
switch
(
0
)
{
case
0
:
// works normally
if
(
!
g_opt
.
m_fac
)
{
CHK
(
g_con
->
execute
(
NoCommit
)
==
0
);
CHK
(
true
||
g_con
->
restart
()
==
0
);
break
;
case
1
:
// nonsense - g_con is invalid for 2nd batch
CHK
(
g_con
->
execute
(
Commit
)
==
0
);
CHK
(
true
||
g_con
->
restart
()
==
0
);
break
;
case
2
:
// DBTC sendSignalErrorRefuseLab
CHK
(
g_con
->
execute
(
NoCommit
)
==
0
);
CHK
(
g_con
->
restart
()
==
0
);
break
;
case
3
:
// 266 time-out
}
else
{
CHK
(
g_con
->
execute
(
Commit
)
==
0
);
CHK
(
g_con
->
restart
()
==
0
);
break
;
}
n
=
0
;
}
...
...
@@ -1824,6 +1815,10 @@ NDB_COMMAND(testOdbcDriver, "testBlobs", "testBlobs", "testBlobs", 65535)
continue
;
}
}
if
(
strcmp
(
arg
,
"-fac"
)
==
0
)
{
g_opt
.
m_fac
=
true
;
continue
;
}
if
(
strcmp
(
arg
,
"-full"
)
==
0
)
{
g_opt
.
m_full
=
true
;
continue
;
...
...
ndb/tools/delete_all.cpp
View file @
d3467c0b
...
...
@@ -23,17 +23,21 @@
#include <NDBT.hpp>
static
int
clear_table
(
Ndb
*
pNdb
,
const
NdbDictionary
::
Table
*
pTab
,
bool
commit_across_open_cursor
,
int
parallelism
=
240
);
bool
fetch_across_commit
,
int
parallelism
=
240
);
NDB_STD_OPTS_VARS
;
static
const
char
*
_dbname
=
"TEST_DB"
;
static
my_bool
_transactional
=
false
;
static
struct
my_option
my_long_options
[]
=
{
NDB_STD_OPTS
(
"ndb_desc"
),
{
"database"
,
'd'
,
"Name of database table is in"
,
(
gptr
*
)
&
_dbname
,
(
gptr
*
)
&
_dbname
,
0
,
GET_STR
,
REQUIRED_ARG
,
0
,
0
,
0
,
0
,
0
,
0
},
{
"transactional"
,
't'
,
"Single transaction (may run out of operations)"
,
(
gptr
*
)
&
_transactional
,
(
gptr
*
)
&
_transactional
,
0
,
GET_BOOL
,
NO_ARG
,
0
,
0
,
0
,
0
,
0
,
0
},
{
0
,
0
,
0
,
0
,
0
,
0
,
GET_NO_ARG
,
NO_ARG
,
0
,
0
,
0
,
0
,
0
,
0
}
};
static
void
usage
()
...
...
@@ -82,18 +86,11 @@ int main(int argc, char** argv){
ndbout
<<
" Table "
<<
argv
[
i
]
<<
" does not exist!"
<<
endl
;
return
NDBT_ProgramExit
(
NDBT_WRONGARGS
);
}
// Check if we have any blobs
bool
commit_across_open_cursor
=
true
;
for
(
int
j
=
0
;
j
<
pTab
->
getNoOfColumns
();
j
++
)
{
NdbDictionary
::
Column
::
Type
t
=
pTab
->
getColumn
(
j
)
->
getType
();
if
(
t
==
NdbDictionary
::
Column
::
Blob
||
t
==
NdbDictionary
::
Column
::
Text
)
{
commit_across_open_cursor
=
false
;
break
;
}
}
ndbout
<<
"Deleting all from "
<<
argv
[
i
]
<<
"..."
;
if
(
clear_table
(
&
MyNdb
,
pTab
,
commit_across_open_cursor
)
==
NDBT_FAILED
){
ndbout
<<
"Deleting all from "
<<
argv
[
i
];
if
(
!
_transactional
)
ndbout
<<
" (non-transactional)"
;
ndbout
<<
" ..."
;
if
(
clear_table
(
&
MyNdb
,
pTab
,
!
_transactional
)
==
NDBT_FAILED
){
res
=
NDBT_FAILED
;
ndbout
<<
"FAILED"
<<
endl
;
}
...
...
@@ -103,7 +100,7 @@ int main(int argc, char** argv){
int
clear_table
(
Ndb
*
pNdb
,
const
NdbDictionary
::
Table
*
pTab
,
bool
commit_across_open_cursor
,
int
parallelism
)
bool
fetch_across_commit
,
int
parallelism
)
{
// Scan all records exclusive and delete
// them one by one
...
...
@@ -165,7 +162,7 @@ int clear_table(Ndb* pNdb, const NdbDictionary::Table* pTab,
}
while
((
check
=
rs
->
nextResult
(
false
))
==
0
);
if
(
check
!=
-
1
){
if
(
commit_across_open_cursor
)
{
if
(
fetch_across_commit
)
{
check
=
pTrans
->
execute
(
Commit
);
pTrans
->
restart
();
// new tx id
}
else
{
...
...
@@ -196,7 +193,7 @@ int clear_table(Ndb* pNdb, const NdbDictionary::Table* pTab,
}
goto
failed
;
}
if
(
!
commit_across_open_cursor
&&
pTrans
->
execute
(
Commit
)
!=
0
)
{
if
(
!
fetch_across_commit
&&
pTrans
->
execute
(
Commit
)
!=
0
)
{
err
=
pTrans
->
getNdbError
();
goto
failed
;
}
...
...
sql/sql_show.cc
View file @
d3467c0b
...
...
@@ -1479,6 +1479,24 @@ store_create_info(THD *thd, TABLE *table, String *packet)
packet
->
append
(
" ENGINE="
,
8
);
packet
->
append
(
file
->
table_type
());
/*
Add AUTO_INCREMENT=... if there is an AUTO_INCREMENT column,
and NEXT_ID > 1 (the default). We must not print the clause
for engines that do not support this as it would break the
import of dumps, but as of this writing, the test for whether
AUTO_INCREMENT columns are allowed and wether AUTO_INCREMENT=...
is supported is identical, !(file->table_flags() & HA_NO_AUTO_INCREMENT))
Because of that, we do not explicitly test for the feature,
but may extrapolate its existence from that of an AUTO_INCREMENT column.
*/
if
(
create_info
.
auto_increment_value
>
1
)
{
packet
->
append
(
" AUTO_INCREMENT="
,
16
);
end
=
longlong10_to_str
(
create_info
.
auto_increment_value
,
buff
,
10
);
packet
->
append
(
buff
,
(
uint
)
(
end
-
buff
));
}
if
(
table
->
table_charset
&&
!
(
thd
->
variables
.
sql_mode
&
MODE_MYSQL323
)
&&
!
(
thd
->
variables
.
sql_mode
&
MODE_MYSQL40
))
...
...
sql/sql_table.cc
View file @
d3467c0b
...
...
@@ -1706,7 +1706,9 @@ mysql_rename_table(enum db_type base,
}
}
delete
file
;
if
(
error
)
if
(
error
==
HA_ERR_WRONG_COMMAND
)
my_error
(
ER_NOT_SUPPORTED_YET
,
MYF
(
0
),
"ALTER TABLE"
);
else
if
(
error
)
my_error
(
ER_ERROR_ON_RENAME
,
MYF
(
0
),
from
,
to
,
error
);
DBUG_RETURN
(
error
!=
0
);
}
...
...
@@ -2192,11 +2194,16 @@ send_result_message:
if
(
fatal_error
)
table
->
table
->
version
=
0
;
// Force close of table
else
if
(
open_for_modify
)
{
if
(
table
->
table
->
tmp_table
)
table
->
table
->
file
->
info
(
HA_STATUS_CONST
);
else
{
pthread_mutex_lock
(
&
LOCK_open
);
remove_table_from_cache
(
thd
,
table
->
table
->
table_cache_key
,
table
->
table
->
real_name
,
RTFC_NO_FLAG
);
pthread_mutex_unlock
(
&
LOCK_open
);
}
/* May be something modified consequently we have to invalidate cache */
query_cache_invalidate3
(
thd
,
table
->
table
,
0
);
}
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment