Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
M
mariadb
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Kirill Smelkov
mariadb
Commits
aa84afb9
Commit
aa84afb9
authored
Apr 30, 2005
by
monty@mysql.com
Browse files
Options
Browse Files
Download
Plain Diff
Merge bk-internal.mysql.com:/home/bk/mysql-4.1
into mysql.com:/home/my/mysql-4.1
parents
fbf31c4a
4b9f462d
Changes
16
Show whitespace changes
Inline
Side-by-side
Showing
16 changed files
with
214 additions
and
36 deletions
+214
-36
BitKeeper/etc/logging_ok
BitKeeper/etc/logging_ok
+2
-0
include/my_sys.h
include/my_sys.h
+1
-0
myisam/myisampack.c
myisam/myisampack.c
+7
-2
myisammrg/myrg_open.c
myisammrg/myrg_open.c
+1
-1
mysql-test/r/group_by.result
mysql-test/r/group_by.result
+9
-0
mysql-test/r/ndb_multi.result
mysql-test/r/ndb_multi.result
+21
-1
mysql-test/t/group_by.test
mysql-test/t/group_by.test
+7
-0
mysql-test/t/ndb_multi.test
mysql-test/t/ndb_multi.test
+24
-0
mysys/my_getwd.c
mysys/my_getwd.c
+22
-0
ndb/include/ndbapi/NdbDictionary.hpp
ndb/include/ndbapi/NdbDictionary.hpp
+4
-1
ndb/src/ndbapi/NdbDictionaryImpl.cpp
ndb/src/ndbapi/NdbDictionaryImpl.cpp
+7
-4
sql/ha_myisammrg.cc
sql/ha_myisammrg.cc
+24
-4
sql/ha_ndbcluster.cc
sql/ha_ndbcluster.cc
+77
-20
sql/ha_ndbcluster.h
sql/ha_ndbcluster.h
+2
-2
sql/protocol.cc
sql/protocol.cc
+1
-1
sql/sql_select.cc
sql/sql_select.cc
+5
-0
No files found.
BitKeeper/etc/logging_ok
View file @
aa84afb9
...
@@ -45,6 +45,8 @@ dlenev@build.mysql.com
...
@@ -45,6 +45,8 @@ dlenev@build.mysql.com
dlenev@jabberwock.localdomain
dlenev@jabberwock.localdomain
dlenev@mysql.com
dlenev@mysql.com
ejonore@mc03.ndb.mysql.com
ejonore@mc03.ndb.mysql.com
evgen@moonbone.(none)
evgen@moonbone.local
gbichot@quadita2.mysql.com
gbichot@quadita2.mysql.com
gbichot@quadxeon.mysql.com
gbichot@quadxeon.mysql.com
georg@beethoven.local
georg@beethoven.local
...
...
include/my_sys.h
View file @
aa84afb9
...
@@ -609,6 +609,7 @@ extern uint dirname_part(my_string to,const char *name);
...
@@ -609,6 +609,7 @@ extern uint dirname_part(my_string to,const char *name);
extern
uint
dirname_length
(
const
char
*
name
);
extern
uint
dirname_length
(
const
char
*
name
);
#define base_name(A) (A+dirname_length(A))
#define base_name(A) (A+dirname_length(A))
extern
int
test_if_hard_path
(
const
char
*
dir_name
);
extern
int
test_if_hard_path
(
const
char
*
dir_name
);
extern
my_bool
has_path
(
const
char
*
name
);
extern
char
*
convert_dirname
(
char
*
to
,
const
char
*
from
,
const
char
*
from_end
);
extern
char
*
convert_dirname
(
char
*
to
,
const
char
*
from
,
const
char
*
from_end
);
extern
void
to_unix_path
(
my_string
name
);
extern
void
to_unix_path
(
my_string
name
);
extern
my_string
fn_ext
(
const
char
*
name
);
extern
my_string
fn_ext
(
const
char
*
name
);
...
...
myisam/myisampack.c
View file @
aa84afb9
...
@@ -31,6 +31,7 @@
...
@@ -31,6 +31,7 @@
#define __GNU_LIBRARY__
/* Skip warnings in getopt.h */
#define __GNU_LIBRARY__
/* Skip warnings in getopt.h */
#endif
#endif
#include <my_getopt.h>
#include <my_getopt.h>
#include <assert.h>
#if INT_MAX > 32767
#if INT_MAX > 32767
#define BITS_SAVED 32
#define BITS_SAVED 32
...
@@ -1991,7 +1992,9 @@ static void write_bits (register ulong value, register uint bits)
...
@@ -1991,7 +1992,9 @@ static void write_bits (register ulong value, register uint bits)
{
{
reg3
uint
byte_buff
;
reg3
uint
byte_buff
;
bits
=
(
uint
)
-
file_buffer
.
bits
;
bits
=
(
uint
)
-
file_buffer
.
bits
;
byte_buff
=
file_buffer
.
current_byte
|
(
uint
)
(
value
>>
bits
);
DBUG_ASSERT
(
bits
<=
8
*
sizeof
(
value
));
byte_buff
=
(
file_buffer
.
current_byte
|
((
bits
!=
8
*
sizeof
(
value
))
?
(
uint
)
(
value
>>
bits
)
:
0
));
#if BITS_SAVED == 32
#if BITS_SAVED == 32
*
file_buffer
.
pos
++=
(
byte
)
(
byte_buff
>>
24
)
;
*
file_buffer
.
pos
++=
(
byte
)
(
byte_buff
>>
24
)
;
*
file_buffer
.
pos
++=
(
byte
)
(
byte_buff
>>
16
)
;
*
file_buffer
.
pos
++=
(
byte
)
(
byte_buff
>>
16
)
;
...
@@ -1999,7 +2002,9 @@ static void write_bits (register ulong value, register uint bits)
...
@@ -1999,7 +2002,9 @@ static void write_bits (register ulong value, register uint bits)
*
file_buffer
.
pos
++=
(
byte
)
(
byte_buff
>>
8
)
;
*
file_buffer
.
pos
++=
(
byte
)
(
byte_buff
>>
8
)
;
*
file_buffer
.
pos
++=
(
byte
)
byte_buff
;
*
file_buffer
.
pos
++=
(
byte
)
byte_buff
;
value
&=
(
1
<<
bits
)
-
1
;
DBUG_ASSERT
(
bits
<=
8
*
sizeof
(
ulong
));
if
(
bits
!=
8
*
sizeof
(
value
))
value
&=
(((
ulong
)
1
)
<<
bits
)
-
1
;
#if BITS_SAVED == 16
#if BITS_SAVED == 16
if
(
bits
>=
sizeof
(
uint
))
if
(
bits
>=
sizeof
(
uint
))
{
{
...
...
myisammrg/myrg_open.c
View file @
aa84afb9
...
@@ -80,7 +80,7 @@ MYRG_INFO *myrg_open(const char *name, int mode, int handle_locking)
...
@@ -80,7 +80,7 @@ MYRG_INFO *myrg_open(const char *name, int mode, int handle_locking)
continue
;
/* Skip comments */
continue
;
/* Skip comments */
}
}
if
(
!
test_if_hard
_path
(
buff
))
if
(
!
has
_path
(
buff
))
{
{
VOID
(
strmake
(
name_buff
+
dir_length
,
buff
,
VOID
(
strmake
(
name_buff
+
dir_length
,
buff
,
sizeof
(
name_buff
)
-
1
-
dir_length
));
sizeof
(
name_buff
)
-
1
-
dir_length
));
...
...
mysql-test/r/group_by.result
View file @
aa84afb9
...
@@ -702,3 +702,12 @@ c
...
@@ -702,3 +702,12 @@ c
val-74
val-74
val-98
val-98
drop table t1,t2;
drop table t1,t2;
create table t1 (b int4 unsigned not null);
insert into t1 values(3000000000);
select * from t1;
b
3000000000
select min(b) from t1;
min(b)
3000000000
drop table t1;
mysql-test/r/ndb_multi.result
View file @
aa84afb9
...
@@ -13,6 +13,26 @@ a
...
@@ -13,6 +13,26 @@ a
show status like 'handler_discover%';
show status like 'handler_discover%';
Variable_name Value
Variable_name Value
Handler_discover 0
Handler_discover 0
select * from t1;
a
2
drop table t1;
create table t1 (a int) engine=ndbcluster;
insert into t1 value (2);
select * from t1;
a
2
show status like 'handler_discover%';
Variable_name Value
Handler_discover 0
drop table t1;
create table t1 (a int) engine=ndbcluster;
insert into t1 value (2);
select * from t1;
ERROR HY000: Got error 241 'Invalid schema object version' from ndbcluster
select * from t1;
a
2
flush status;
flush status;
select * from t1;
select * from t1;
a
a
...
@@ -20,7 +40,7 @@ a
...
@@ -20,7 +40,7 @@ a
update t1 set a=3 where a=2;
update t1 set a=3 where a=2;
show status like 'handler_discover%';
show status like 'handler_discover%';
Variable_name Value
Variable_name Value
Handler_discover
1
Handler_discover
0
create table t3 (a int not null primary key, b varchar(22),
create table t3 (a int not null primary key, b varchar(22),
c int, last_col text) engine=ndb;
c int, last_col text) engine=ndb;
insert into t3 values(1, 'Hi!', 89, 'Longtext column');
insert into t3 values(1, 'Hi!', 89, 'Longtext column');
...
...
mysql-test/t/group_by.test
View file @
aa84afb9
...
@@ -515,3 +515,10 @@ explain select c from t2 where a = 2 and b = 'val-2' group by c;
...
@@ -515,3 +515,10 @@ explain select c from t2 where a = 2 and b = 'val-2' group by c;
select
c
from
t2
where
a
=
2
and
b
=
'val-2'
group
by
c
;
select
c
from
t2
where
a
=
2
and
b
=
'val-2'
group
by
c
;
drop
table
t1
,
t2
;
drop
table
t1
,
t2
;
# Test for BUG#9298 "Wrong handling of int4 unsigned columns in GROUP functions"
# (the actual problem was with protocol code, not GROUP BY)
create
table
t1
(
b
int4
unsigned
not
null
);
insert
into
t1
values
(
3000000000
);
select
*
from
t1
;
select
min
(
b
)
from
t1
;
drop
table
t1
;
mysql-test/t/ndb_multi.test
View file @
aa84afb9
...
@@ -18,6 +18,30 @@ select * from t1;
...
@@ -18,6 +18,30 @@ select * from t1;
select
*
from
t2
;
select
*
from
t2
;
show
status
like
'handler_discover%'
;
show
status
like
'handler_discover%'
;
# Check dropping and recreating table on same server
connect
(
con1
,
localhost
,,,
test
);
connect
(
con2
,
localhost
,,,
test
);
connection
con1
;
select
*
from
t1
;
connection
con2
;
drop
table
t1
;
create
table
t1
(
a
int
)
engine
=
ndbcluster
;
insert
into
t1
value
(
2
);
connection
con1
;
select
*
from
t1
;
# Check dropping and recreating table on different server
connection
server2
;
show
status
like
'handler_discover%'
;
drop
table
t1
;
create
table
t1
(
a
int
)
engine
=
ndbcluster
;
insert
into
t1
value
(
2
);
connection
server1
;
# Currently a retry is required remotely
--
error
1296
select
*
from
t1
;
select
*
from
t1
;
# Connect to server2 and use the tables from there
# Connect to server2 and use the tables from there
connection
server2
;
connection
server2
;
flush
status
;
flush
status
;
...
...
mysys/my_getwd.c
View file @
aa84afb9
...
@@ -192,3 +192,25 @@ int test_if_hard_path(register const char *dir_name)
...
@@ -192,3 +192,25 @@ int test_if_hard_path(register const char *dir_name)
return
FALSE
;
return
FALSE
;
#endif
#endif
}
/* test_if_hard_path */
}
/* test_if_hard_path */
/*
Test if a name contains an (absolute or relative) path.
SYNOPSIS
has_path()
name The name to test.
RETURN
TRUE name contains a path.
FALSE name does not contain a path.
*/
my_bool
has_path
(
const
char
*
name
)
{
return
test
(
strchr
(
name
,
FN_LIBCHAR
))
#ifdef FN_DEVCHAR
||
test
(
strchr
(
name
,
FN_DEVCHAR
))
#endif
;
}
ndb/include/ndbapi/NdbDictionary.hpp
View file @
aa84afb9
...
@@ -75,8 +75,11 @@ public:
...
@@ -75,8 +75,11 @@ public:
Changed
,
///< The object has been modified in memory
Changed
,
///< The object has been modified in memory
///< and has to be commited in NDB Kernel for
///< and has to be commited in NDB Kernel for
///< changes to take effect
///< changes to take effect
Retrieved
///< The object exist and has been read
Retrieved
,
///< The object exist and has been read
///< into main memory from NDB Kernel
///< into main memory from NDB Kernel
Invalid
///< The object has been invalidated
///< and should not be used
};
};
/**
/**
...
...
ndb/src/ndbapi/NdbDictionaryImpl.cpp
View file @
aa84afb9
...
@@ -1448,6 +1448,7 @@ int NdbDictionaryImpl::alterTable(NdbTableImpl &impl)
...
@@ -1448,6 +1448,7 @@ int NdbDictionaryImpl::alterTable(NdbTableImpl &impl)
// If in local cache it must be in global
// If in local cache it must be in global
if
(
!
cachedImpl
)
if
(
!
cachedImpl
)
abort
();
abort
();
cachedImpl
->
m_status
=
NdbDictionary
::
Object
::
Invalid
;
m_globalHash
->
drop
(
cachedImpl
);
m_globalHash
->
drop
(
cachedImpl
);
m_globalHash
->
unlock
();
m_globalHash
->
unlock
();
}
}
...
@@ -1747,8 +1748,8 @@ NdbDictionaryImpl::dropTable(const char * name)
...
@@ -1747,8 +1748,8 @@ NdbDictionaryImpl::dropTable(const char * name)
DBUG_PRINT
(
"info"
,(
"INCOMPATIBLE_VERSION internal_name: %s"
,
internalTableName
));
DBUG_PRINT
(
"info"
,(
"INCOMPATIBLE_VERSION internal_name: %s"
,
internalTableName
));
m_localHash
.
drop
(
internalTableName
);
m_localHash
.
drop
(
internalTableName
);
m_globalHash
->
lock
();
m_globalHash
->
lock
();
tab
->
m_status
=
NdbDictionary
::
Object
::
Invalid
;
m_globalHash
->
drop
(
tab
);
m_globalHash
->
drop
(
tab
);
m_globalHash
->
unlock
();
m_globalHash
->
unlock
();
DBUG_RETURN
(
dropTable
(
name
));
DBUG_RETURN
(
dropTable
(
name
));
...
@@ -1793,9 +1794,10 @@ NdbDictionaryImpl::dropTable(NdbTableImpl & impl)
...
@@ -1793,9 +1794,10 @@ NdbDictionaryImpl::dropTable(NdbTableImpl & impl)
if
(
ret
==
0
||
m_error
.
code
==
709
){
if
(
ret
==
0
||
m_error
.
code
==
709
){
const
char
*
internalTableName
=
impl
.
m_internalName
.
c_str
();
const
char
*
internalTableName
=
impl
.
m_internalName
.
c_str
();
m_localHash
.
drop
(
internalTableName
);
m_localHash
.
drop
(
internalTableName
);
m_globalHash
->
lock
();
m_globalHash
->
lock
();
impl
.
m_status
=
NdbDictionary
::
Object
::
Invalid
;
m_globalHash
->
drop
(
&
impl
);
m_globalHash
->
drop
(
&
impl
);
m_globalHash
->
unlock
();
m_globalHash
->
unlock
();
...
@@ -1889,6 +1891,7 @@ NdbDictionaryImpl::invalidateObject(NdbTableImpl & impl)
...
@@ -1889,6 +1891,7 @@ NdbDictionaryImpl::invalidateObject(NdbTableImpl & impl)
m_localHash
.
drop
(
internalTableName
);
m_localHash
.
drop
(
internalTableName
);
m_globalHash
->
lock
();
m_globalHash
->
lock
();
impl
.
m_status
=
NdbDictionary
::
Object
::
Invalid
;
m_globalHash
->
drop
(
&
impl
);
m_globalHash
->
drop
(
&
impl
);
m_globalHash
->
unlock
();
m_globalHash
->
unlock
();
return
0
;
return
0
;
...
@@ -2152,8 +2155,8 @@ NdbDictionaryImpl::dropIndex(const char * indexName,
...
@@ -2152,8 +2155,8 @@ NdbDictionaryImpl::dropIndex(const char * indexName,
m_ndb
.
internalizeTableName
(
indexName
);
// Index is also a table
m_ndb
.
internalizeTableName
(
indexName
);
// Index is also a table
m_localHash
.
drop
(
internalIndexName
);
m_localHash
.
drop
(
internalIndexName
);
m_globalHash
->
lock
();
m_globalHash
->
lock
();
idx
->
m_table
->
m_status
=
NdbDictionary
::
Object
::
Invalid
;
m_globalHash
->
drop
(
idx
->
m_table
);
m_globalHash
->
drop
(
idx
->
m_table
);
m_globalHash
->
unlock
();
m_globalHash
->
unlock
();
return
dropIndex
(
indexName
,
tableName
);
return
dropIndex
(
indexName
,
tableName
);
...
@@ -2187,8 +2190,8 @@ NdbDictionaryImpl::dropIndex(NdbIndexImpl & impl, const char * tableName)
...
@@ -2187,8 +2190,8 @@ NdbDictionaryImpl::dropIndex(NdbIndexImpl & impl, const char * tableName)
int
ret
=
m_receiver
.
dropIndex
(
impl
,
*
timpl
);
int
ret
=
m_receiver
.
dropIndex
(
impl
,
*
timpl
);
if
(
ret
==
0
){
if
(
ret
==
0
){
m_localHash
.
drop
(
internalIndexName
);
m_localHash
.
drop
(
internalIndexName
);
m_globalHash
->
lock
();
m_globalHash
->
lock
();
impl
.
m_table
->
m_status
=
NdbDictionary
::
Object
::
Invalid
;
m_globalHash
->
drop
(
impl
.
m_table
);
m_globalHash
->
drop
(
impl
.
m_table
);
m_globalHash
->
unlock
();
m_globalHash
->
unlock
();
}
}
...
...
sql/ha_myisammrg.cc
View file @
aa84afb9
...
@@ -381,6 +381,7 @@ int ha_myisammrg::create(const char *name, register TABLE *form,
...
@@ -381,6 +381,7 @@ int ha_myisammrg::create(const char *name, register TABLE *form,
char
buff
[
FN_REFLEN
],
**
table_names
,
**
pos
;
char
buff
[
FN_REFLEN
],
**
table_names
,
**
pos
;
TABLE_LIST
*
tables
=
(
TABLE_LIST
*
)
create_info
->
merge_list
.
first
;
TABLE_LIST
*
tables
=
(
TABLE_LIST
*
)
create_info
->
merge_list
.
first
;
THD
*
thd
=
current_thd
;
THD
*
thd
=
current_thd
;
uint
dirlgt
=
dirname_length
(
name
);
DBUG_ENTER
(
"ha_myisammrg::create"
);
DBUG_ENTER
(
"ha_myisammrg::create"
);
if
(
!
(
table_names
=
(
char
**
)
thd
->
alloc
((
create_info
->
merge_list
.
elements
+
1
)
*
if
(
!
(
table_names
=
(
char
**
)
thd
->
alloc
((
create_info
->
merge_list
.
elements
+
1
)
*
...
@@ -394,10 +395,29 @@ int ha_myisammrg::create(const char *name, register TABLE *form,
...
@@ -394,10 +395,29 @@ int ha_myisammrg::create(const char *name, register TABLE *form,
tbl
=
find_temporary_table
(
thd
,
tables
->
db
,
tables
->
real_name
);
tbl
=
find_temporary_table
(
thd
,
tables
->
db
,
tables
->
real_name
);
if
(
!
tbl
)
if
(
!
tbl
)
{
{
uint
length
=
my_snprintf
(
buff
,
FN_REFLEN
,
"%s%s/%s"
,
/*
mysql_real_data_home
,
Construct the path to the MyISAM table. Try to meet two conditions:
1.) Allow to include MyISAM tables from different databases, and
2.) allow for moving DATADIR around in the file system.
The first means that we need paths in the .MRG file. The second
means that we should not have absolute paths in the .MRG file.
The best, we can do, is to use 'mysql_data_home', which is '.'
in mysqld and may be an absolute path in an embedded server.
This means that it might not be possible to move the DATADIR of
an embedded server without changing the paths in the .MRG file.
*/
uint
length
=
my_snprintf
(
buff
,
FN_REFLEN
,
"%s/%s/%s"
,
mysql_data_home
,
tables
->
db
,
tables
->
real_name
);
tables
->
db
,
tables
->
real_name
);
if
(
!
(
table_name
=
thd
->
strmake
(
buff
,
length
)))
/*
If a MyISAM table is in the same directory as the MERGE table,
we use the table name without a path. This means that the
DATADIR can easily be moved even for an embedded server as long
as the MyISAM tables are from the same database as the MERGE table.
*/
if
((
dirname_length
(
buff
)
==
dirlgt
)
&&
!
memcmp
(
buff
,
name
,
dirlgt
))
table_name
=
tables
->
real_name
;
else
if
(
!
(
table_name
=
thd
->
strmake
(
buff
,
length
)))
DBUG_RETURN
(
HA_ERR_OUT_OF_MEM
);
DBUG_RETURN
(
HA_ERR_OUT_OF_MEM
);
}
}
else
else
...
...
sql/ha_ndbcluster.cc
View file @
aa84afb9
/* Copyright (C) 2000-2003 MySQL AB
/* Copyright (C) 2000-2003 MySQL AB
This program is free software; you can redistribute it and/or modify
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
it under the terms of the GNU General Public License as published by
...
@@ -331,11 +331,28 @@ void ha_ndbcluster::no_uncommitted_rows_reset(THD *thd)
...
@@ -331,11 +331,28 @@ void ha_ndbcluster::no_uncommitted_rows_reset(THD *thd)
# The mapped error code
# The mapped error code
*/
*/
void
ha_ndbcluster
::
invalidate
DictionaryCache
(
)
void
ha_ndbcluster
::
invalidate
_dictionary_cache
(
bool
global
)
{
{
NDBDICT
*
dict
=
get_ndb
()
->
getDictionary
();
NDBDICT
*
dict
=
get_ndb
()
->
getDictionary
();
DBUG_ENTER
(
"invalidate_dictionary_cache"
);
DBUG_PRINT
(
"info"
,
(
"invalidating %s"
,
m_tabname
));
DBUG_PRINT
(
"info"
,
(
"invalidating %s"
,
m_tabname
));
if
(
global
)
{
const
NDBTAB
*
tab
=
dict
->
getTable
(
m_tabname
);
if
(
!
tab
)
DBUG_VOID_RETURN
;
if
(
tab
->
getObjectStatus
()
==
NdbDictionary
::
Object
::
Invalid
)
{
// Global cache has already been invalidated
dict
->
removeCachedTable
(
m_tabname
);
global
=
FALSE
;
}
else
dict
->
invalidateTable
(
m_tabname
);
dict
->
invalidateTable
(
m_tabname
);
}
else
dict
->
removeCachedTable
(
m_tabname
);
table
->
version
=
0L
;
/* Free when thread is ready */
table
->
version
=
0L
;
/* Free when thread is ready */
/* Invalidate indexes */
/* Invalidate indexes */
for
(
uint
i
=
0
;
i
<
table
->
keys
;
i
++
)
for
(
uint
i
=
0
;
i
<
table
->
keys
;
i
++
)
...
@@ -347,18 +364,28 @@ void ha_ndbcluster::invalidateDictionaryCache()
...
@@ -347,18 +364,28 @@ void ha_ndbcluster::invalidateDictionaryCache()
switch
(
idx_type
)
{
switch
(
idx_type
)
{
case
(
PRIMARY_KEY_ORDERED_INDEX
):
case
(
PRIMARY_KEY_ORDERED_INDEX
):
case
(
ORDERED_INDEX
):
case
(
ORDERED_INDEX
):
if
(
global
)
dict
->
invalidateIndex
(
index
->
getName
(),
m_tabname
);
dict
->
invalidateIndex
(
index
->
getName
(),
m_tabname
);
else
dict
->
removeCachedIndex
(
index
->
getName
(),
m_tabname
);
break
;
break
;
case
(
UNIQUE_ORDERED_INDEX
):
case
(
UNIQUE_ORDERED_INDEX
):
if
(
global
)
dict
->
invalidateIndex
(
index
->
getName
(),
m_tabname
);
dict
->
invalidateIndex
(
index
->
getName
(),
m_tabname
);
else
dict
->
removeCachedIndex
(
index
->
getName
(),
m_tabname
);
case
(
UNIQUE_INDEX
):
case
(
UNIQUE_INDEX
):
if
(
global
)
dict
->
invalidateIndex
(
unique_index
->
getName
(),
m_tabname
);
dict
->
invalidateIndex
(
unique_index
->
getName
(),
m_tabname
);
else
dict
->
removeCachedIndex
(
unique_index
->
getName
(),
m_tabname
);
break
;
break
;
case
(
PRIMARY_KEY_INDEX
):
case
(
PRIMARY_KEY_INDEX
):
case
(
UNDEFINED_INDEX
):
case
(
UNDEFINED_INDEX
):
break
;
break
;
}
}
}
}
DBUG_VOID_RETURN
;
}
}
int
ha_ndbcluster
::
ndb_err
(
NdbConnection
*
trans
)
int
ha_ndbcluster
::
ndb_err
(
NdbConnection
*
trans
)
...
@@ -371,7 +398,7 @@ int ha_ndbcluster::ndb_err(NdbConnection *trans)
...
@@ -371,7 +398,7 @@ int ha_ndbcluster::ndb_err(NdbConnection *trans)
switch
(
err
.
classification
)
{
switch
(
err
.
classification
)
{
case
NdbError
:
:
SchemaError
:
case
NdbError
:
:
SchemaError
:
{
{
invalidate
DictionaryCache
(
);
invalidate
_dictionary_cache
(
TRUE
);
if
(
err
.
code
==
284
)
if
(
err
.
code
==
284
)
{
{
...
@@ -765,9 +792,16 @@ int ha_ndbcluster::get_metadata(const char *path)
...
@@ -765,9 +792,16 @@ int ha_ndbcluster::get_metadata(const char *path)
const
void
*
data
,
*
pack_data
;
const
void
*
data
,
*
pack_data
;
uint
length
,
pack_length
;
uint
length
,
pack_length
;
if
(
!
(
tab
=
dict
->
getTable
(
m_tabname
)))
ERR_RETURN
(
dict
->
getNdbError
());
// Check if thread has stale local cache
if
(
tab
->
getObjectStatus
()
==
NdbDictionary
::
Object
::
Invalid
)
{
invalidate_dictionary_cache
(
FALSE
);
if
(
!
(
tab
=
dict
->
getTable
(
m_tabname
)))
if
(
!
(
tab
=
dict
->
getTable
(
m_tabname
)))
ERR_RETURN
(
dict
->
getNdbError
());
ERR_RETURN
(
dict
->
getNdbError
());
DBUG_PRINT
(
"info"
,
(
"Table schema version: %d"
,
tab
->
getObjectVersion
()));
DBUG_PRINT
(
"info"
,
(
"Table schema version: %d"
,
tab
->
getObjectVersion
()));
}
/*
/*
Compare FrmData in NDB with frm file from disk.
Compare FrmData in NDB with frm file from disk.
*/
*/
...
@@ -786,7 +820,7 @@ int ha_ndbcluster::get_metadata(const char *path)
...
@@ -786,7 +820,7 @@ int ha_ndbcluster::get_metadata(const char *path)
if
(
!
invalidating_ndb_table
)
if
(
!
invalidating_ndb_table
)
{
{
DBUG_PRINT
(
"info"
,
(
"Invalidating table"
));
DBUG_PRINT
(
"info"
,
(
"Invalidating table"
));
invalidate
DictionaryCache
(
);
invalidate
_dictionary_cache
(
TRUE
);
invalidating_ndb_table
=
TRUE
;
invalidating_ndb_table
=
TRUE
;
}
}
else
else
...
@@ -812,7 +846,7 @@ int ha_ndbcluster::get_metadata(const char *path)
...
@@ -812,7 +846,7 @@ int ha_ndbcluster::get_metadata(const char *path)
if
(
error
)
if
(
error
)
DBUG_RETURN
(
error
);
DBUG_RETURN
(
error
);
m_table
V
ersion
=
tab
->
getObjectVersion
();
m_table
_v
ersion
=
tab
->
getObjectVersion
();
m_table
=
(
void
*
)
tab
;
m_table
=
(
void
*
)
tab
;
m_table_info
=
NULL
;
// Set in external lock
m_table_info
=
NULL
;
// Set in external lock
...
@@ -3226,15 +3260,25 @@ int ha_ndbcluster::external_lock(THD *thd, int lock_type)
...
@@ -3226,15 +3260,25 @@ int ha_ndbcluster::external_lock(THD *thd, int lock_type)
void
*
tab_info
;
void
*
tab_info
;
if
(
!
(
tab
=
dict
->
getTable
(
m_tabname
,
&
tab_info
)))
if
(
!
(
tab
=
dict
->
getTable
(
m_tabname
,
&
tab_info
)))
ERR_RETURN
(
dict
->
getNdbError
());
ERR_RETURN
(
dict
->
getNdbError
());
DBUG_PRINT
(
"info"
,
(
"Table schema version: %d"
,
tab
->
getObjectVersion
()));
DBUG_PRINT
(
"info"
,
(
"Table schema version: %d"
,
if
(
m_table
!=
(
void
*
)
tab
||
m_tableVersion
!=
tab
->
getObjectVersion
())
tab
->
getObjectVersion
()));
// Check if thread has stale local cache
if
(
tab
->
getObjectStatus
()
==
NdbDictionary
::
Object
::
Invalid
)
{
invalidate_dictionary_cache
(
FALSE
);
if
(
!
(
tab
=
dict
->
getTable
(
m_tabname
,
&
tab_info
)))
ERR_RETURN
(
dict
->
getNdbError
());
DBUG_PRINT
(
"info"
,
(
"Table schema version: %d"
,
tab
->
getObjectVersion
()));
}
if
(
m_table
!=
(
void
*
)
tab
||
m_table_version
<
tab
->
getObjectVersion
())
{
{
/*
/*
The table has been altered, refresh the index list
The table has been altered, refresh the index list
*/
*/
build_index_list
(
ndb
,
table
,
ILBP_OPEN
);
build_index_list
(
ndb
,
table
,
ILBP_OPEN
);
m_table
=
(
void
*
)
tab
;
m_table
=
(
void
*
)
tab
;
m_table
V
ersion
=
tab
->
getObjectVersion
();
m_table
_v
ersion
=
tab
->
getObjectVersion
();
}
}
m_table_info
=
tab_info
;
m_table_info
=
tab_info
;
}
}
...
@@ -3260,7 +3304,6 @@ int ha_ndbcluster::external_lock(THD *thd, int lock_type)
...
@@ -3260,7 +3304,6 @@ int ha_ndbcluster::external_lock(THD *thd, int lock_type)
thd
->
transaction
.
stmt
.
ndb_tid
=
0
;
thd
->
transaction
.
stmt
.
ndb_tid
=
0
;
}
}
}
}
m_table
=
NULL
;
m_table_info
=
NULL
;
m_table_info
=
NULL
;
/*
/*
This is the place to make sure this handler instance
This is the place to make sure this handler instance
...
@@ -3882,7 +3925,13 @@ int ha_ndbcluster::rename_table(const char *from, const char *to)
...
@@ -3882,7 +3925,13 @@ int ha_ndbcluster::rename_table(const char *from, const char *to)
dict
=
ndb
->
getDictionary
();
dict
=
ndb
->
getDictionary
();
if
(
!
(
orig_tab
=
dict
->
getTable
(
m_tabname
)))
if
(
!
(
orig_tab
=
dict
->
getTable
(
m_tabname
)))
ERR_RETURN
(
dict
->
getNdbError
());
ERR_RETURN
(
dict
->
getNdbError
());
// Check if thread has stale local cache
if
(
orig_tab
->
getObjectStatus
()
==
NdbDictionary
::
Object
::
Invalid
)
{
dict
->
removeCachedTable
(
m_tabname
);
if
(
!
(
orig_tab
=
dict
->
getTable
(
m_tabname
)))
ERR_RETURN
(
dict
->
getNdbError
());
}
m_table
=
(
void
*
)
orig_tab
;
m_table
=
(
void
*
)
orig_tab
;
// Change current database to that of target table
// Change current database to that of target table
set_dbname
(
to
);
set_dbname
(
to
);
...
@@ -4006,7 +4055,7 @@ ha_ndbcluster::ha_ndbcluster(TABLE *table_arg):
...
@@ -4006,7 +4055,7 @@ ha_ndbcluster::ha_ndbcluster(TABLE *table_arg):
m_active_trans
(
NULL
),
m_active_trans
(
NULL
),
m_active_cursor
(
NULL
),
m_active_cursor
(
NULL
),
m_table
(
NULL
),
m_table
(
NULL
),
m_table
V
ersion
(
-
1
),
m_table
_v
ersion
(
-
1
),
m_table_info
(
NULL
),
m_table_info
(
NULL
),
m_table_flags
(
HA_REC_NOT_IN_SEQ
|
m_table_flags
(
HA_REC_NOT_IN_SEQ
|
HA_NULL_IN_KEY
|
HA_NULL_IN_KEY
|
...
@@ -4250,7 +4299,6 @@ int ndbcluster_discover(THD* thd, const char *db, const char *name,
...
@@ -4250,7 +4299,6 @@ int ndbcluster_discover(THD* thd, const char *db, const char *name,
DBUG_RETURN
(
1
);
DBUG_RETURN
(
1
);
ERR_RETURN
(
err
);
ERR_RETURN
(
err
);
}
}
DBUG_PRINT
(
"info"
,
(
"Found table %s"
,
tab
->
getName
()));
DBUG_PRINT
(
"info"
,
(
"Found table %s"
,
tab
->
getName
()));
len
=
tab
->
getFrmLength
();
len
=
tab
->
getFrmLength
();
...
@@ -4327,6 +4375,7 @@ int ndbcluster_drop_database(const char *path)
...
@@ -4327,6 +4375,7 @@ int ndbcluster_drop_database(const char *path)
uint
i
;
uint
i
;
char
*
tabname
;
char
*
tabname
;
List
<
char
>
drop_list
;
List
<
char
>
drop_list
;
int
ret
=
0
;
ha_ndbcluster
::
set_dbname
(
path
,
(
char
*
)
&
dbname
);
ha_ndbcluster
::
set_dbname
(
path
,
(
char
*
)
&
dbname
);
DBUG_PRINT
(
"enter"
,
(
"db: %s"
,
dbname
));
DBUG_PRINT
(
"enter"
,
(
"db: %s"
,
dbname
));
...
@@ -4353,10 +4402,18 @@ int ndbcluster_drop_database(const char *path)
...
@@ -4353,10 +4402,18 @@ int ndbcluster_drop_database(const char *path)
ndb
->
setDatabaseName
(
dbname
);
ndb
->
setDatabaseName
(
dbname
);
List_iterator_fast
<
char
>
it
(
drop_list
);
List_iterator_fast
<
char
>
it
(
drop_list
);
while
((
tabname
=
it
++
))
while
((
tabname
=
it
++
))
{
if
(
dict
->
dropTable
(
tabname
))
if
(
dict
->
dropTable
(
tabname
))
ERR_RETURN
(
dict
->
getNdbError
());
{
const
NdbError
err
=
dict
->
getNdbError
();
DBUG_RETURN
(
0
);
if
(
err
.
code
!=
709
)
{
ERR_PRINT
(
err
);
ret
=
ndb_to_mysql_error
(
&
err
);
}
}
}
DBUG_RETURN
(
ret
);
}
}
...
...
sql/ha_ndbcluster.h
View file @
aa84afb9
...
@@ -203,7 +203,7 @@ class ha_ndbcluster: public handler
...
@@ -203,7 +203,7 @@ class ha_ndbcluster: public handler
void
print_results
();
void
print_results
();
longlong
get_auto_increment
();
longlong
get_auto_increment
();
void
invalidate
DictionaryCache
(
);
void
invalidate
_dictionary_cache
(
bool
global
);
int
ndb_err
(
NdbConnection
*
);
int
ndb_err
(
NdbConnection
*
);
bool
uses_blob_value
(
bool
all_fields
);
bool
uses_blob_value
(
bool
all_fields
);
...
@@ -215,7 +215,7 @@ class ha_ndbcluster: public handler
...
@@ -215,7 +215,7 @@ class ha_ndbcluster: public handler
NdbConnection
*
m_active_trans
;
NdbConnection
*
m_active_trans
;
NdbResultSet
*
m_active_cursor
;
NdbResultSet
*
m_active_cursor
;
void
*
m_table
;
void
*
m_table
;
int
m_table
V
ersion
;
int
m_table
_v
ersion
;
void
*
m_table_info
;
void
*
m_table_info
;
char
m_dbname
[
FN_HEADLEN
];
char
m_dbname
[
FN_HEADLEN
];
//char m_schemaname[FN_HEADLEN];
//char m_schemaname[FN_HEADLEN];
...
...
sql/protocol.cc
View file @
aa84afb9
...
@@ -810,7 +810,7 @@ bool Protocol_simple::store_long(longlong from)
...
@@ -810,7 +810,7 @@ bool Protocol_simple::store_long(longlong from)
#endif
#endif
char
buff
[
20
];
char
buff
[
20
];
return
net_store_data
((
char
*
)
buff
,
return
net_store_data
((
char
*
)
buff
,
(
uint
)
(
int10_to_str
((
int
)
from
,
buff
,
-
10
)
-
buff
));
(
uint
)
(
int10_to_str
((
int
)
from
,
buff
,
(
from
<
0
)
?-
10
:
10
)
-
buff
));
}
}
...
...
sql/sql_select.cc
View file @
aa84afb9
...
@@ -6893,6 +6893,11 @@ end_write_group(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)),
...
@@ -6893,6 +6893,11 @@ end_write_group(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)),
&
join
->
tmp_table_param
,
&
join
->
tmp_table_param
,
error
,
0
))
error
,
0
))
DBUG_RETURN
(
-
1
);
DBUG_RETURN
(
-
1
);
/*
If table->file->write_row() was failed because of 'out of memory'
and tmp table succesfully created, reset error.
*/
error
=
0
;
}
}
if
(
join
->
rollup
.
state
!=
ROLLUP
::
STATE_NONE
&&
error
<=
0
)
if
(
join
->
rollup
.
state
!=
ROLLUP
::
STATE_NONE
&&
error
<=
0
)
{
{
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment