Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
M
mariadb
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Kirill Smelkov
mariadb
Commits
696c7677
Commit
696c7677
authored
Mar 08, 2006
by
tomas@poseidon.ndb.mysql.com
Browse files
Options
Browse Files
Download
Plain Diff
Merge tulin@bk-internal.mysql.com:/home/bk/mysql-5.1-new
into poseidon.ndb.mysql.com:/home/tomas/mysql-5.1-new
parents
3c1f4e3c
0eb12caf
Changes
5
Hide whitespace changes
Inline
Side-by-side
Showing
5 changed files
with
82 additions
and
37 deletions
+82
-37
mysql-test/r/ndb_lock.result
mysql-test/r/ndb_lock.result
+11
-0
mysql-test/t/ndb_lock.test
mysql-test/t/ndb_lock.test
+36
-0
sql/ha_ndbcluster.cc
sql/ha_ndbcluster.cc
+32
-33
sql/ha_ndbcluster.h
sql/ha_ndbcluster.h
+1
-4
storage/ndb/src/ndbapi/NdbEventOperationImpl.cpp
storage/ndb/src/ndbapi/NdbEventOperationImpl.cpp
+2
-0
No files found.
mysql-test/r/ndb_lock.result
View file @
696c7677
...
...
@@ -63,3 +63,14 @@ pk u o
5 5 5
insert into t1 values (1,1,1);
drop table t1;
create table t3 (id2 int) engine=ndb;
lock tables t3 write;
unlock tables;
drop table t3;
create table t2 (id int, j int) engine=ndb;
insert into t2 values (2, 2);
create table t3 (id int) engine=ndb;
lock tables t3 read;
delete t2 from t2, t3 where t2.id = t3.id;
unlock tables;
drop table t2, t3;
mysql-test/t/ndb_lock.test
View file @
696c7677
...
...
@@ -70,3 +70,39 @@ insert into t1 values (1,1,1);
drop
table
t1
;
# End of 4.1 tests
#
# Bug #17812 Previous lock table for write causes "stray" lock
# although table is recreated
#
# this creating, locking, and dropping causes a subsequent hang
# on the delete below waiting for table t2 the locking in the
# "other" connection is relevant, as without it there is no problem
#
connection
con1
;
create
table
t3
(
id2
int
)
engine
=
ndb
;
connection
con2
;
lock
tables
t3
write
;
unlock
tables
;
connection
con1
;
drop
table
t3
;
connection
con1
;
create
table
t2
(
id
int
,
j
int
)
engine
=
ndb
;
insert
into
t2
values
(
2
,
2
);
create
table
t3
(
id
int
)
engine
=
ndb
;
connection
con2
;
lock
tables
t3
read
;
connection
con1
;
# here we get a hang before bugfix although we shouldn't
delete
t2
from
t2
,
t3
where
t2
.
id
=
t3
.
id
;
connection
con2
;
unlock
tables
;
connection
con1
;
drop
table
t2
,
t3
;
sql/ha_ndbcluster.cc
View file @
696c7677
...
...
@@ -466,60 +466,58 @@ void ha_ndbcluster::no_uncommitted_rows_reset(THD *thd)
# The mapped error code
*/
int
ha_ndbcluster
::
invalidate_dictionary_cache
(
TABLE_SHARE
*
share
,
Ndb
*
ndb
,
const
char
*
dbname
,
const
char
*
tabname
,
bool
global
)
int
ha_ndbcluster
::
invalidate_dictionary_cache
(
bool
global
)
{
NDBDICT
*
dict
=
ndb
->
getDictionary
();
NDBDICT
*
dict
=
get_ndb
()
->
getDictionary
();
DBUG_ENTER
(
"invalidate_dictionary_cache"
);
DBUG_PRINT
(
"info"
,
(
"invalidating %s"
,
tabname
));
#ifdef HAVE_NDB_BINLOG
char
key
[
FN_REFLEN
];
build_table_filename
(
key
,
sizeof
(
key
),
dbname
,
tabname
,
""
);
DBUG_PRINT
(
"info"
,
(
"Getting ndbcluster mutex"
));
pthread_mutex_lock
(
&
ndbcluster_mutex
);
NDB_SHARE
*
ndb_share
=
(
NDB_SHARE
*
)
hash_search
(
&
ndbcluster_open_tables
,
(
byte
*
)
key
,
strlen
(
key
));
pthread_mutex_unlock
(
&
ndbcluster_mutex
);
DBUG_PRINT
(
"info"
,
(
"Released ndbcluster mutex"
));
// Only binlog_thread is allowed to globally invalidate a table
if
(
global
&&
ndb_share
&&
ndb_share
->
op
&&
(
current_thd
!=
injector_thd
))
DBUG_RETURN
(
1
);
#endif
DBUG_PRINT
(
"info"
,
(
"m_tabname: %s global: %d"
,
m_tabname
,
global
));
if
(
global
)
{
const
NDBTAB
*
tab
=
dict
->
getTable
(
tabname
);
#ifdef HAVE_NDB_BINLOG
if
(
current_thd
!=
injector_thd
)
{
char
key
[
FN_REFLEN
];
build_table_filename
(
key
,
sizeof
(
key
),
m_dbname
,
m_tabname
,
""
);
DBUG_PRINT
(
"info"
,
(
"Getting ndbcluster mutex"
));
pthread_mutex_lock
(
&
ndbcluster_mutex
);
NDB_SHARE
*
ndb_share
=
(
NDB_SHARE
*
)
hash_search
(
&
ndbcluster_open_tables
,
(
byte
*
)
key
,
strlen
(
key
));
// Only binlog_thread is allowed to globally invalidate a table
if
(
ndb_share
&&
ndb_share
->
op
)
{
pthread_mutex_unlock
(
&
ndbcluster_mutex
);
DBUG_PRINT
(
"info"
,
(
"Released ndbcluster mutex"
));
DBUG_RETURN
(
1
);
}
pthread_mutex_unlock
(
&
ndbcluster_mutex
);
DBUG_PRINT
(
"info"
,
(
"Released ndbcluster mutex"
));
}
#endif
const
NDBTAB
*
tab
=
dict
->
getTable
(
m_tabname
);
if
(
!
tab
)
DBUG_RETURN
(
1
);
if
(
tab
->
getObjectStatus
()
==
NdbDictionary
::
Object
::
Invalid
)
{
// Global cache has already been invalidated
dict
->
removeCachedTable
(
tabname
);
dict
->
removeCachedTable
(
m_
tabname
);
global
=
FALSE
;
DBUG_PRINT
(
"info"
,
(
"global: %d"
,
global
));
}
else
dict
->
invalidateTable
(
tabname
);
dict
->
invalidateTable
(
m_tabname
);
table_share
->
version
=
0L
;
/* Free when thread is ready */
}
else
dict
->
removeCachedTable
(
tabname
);
share
->
version
=
0L
;
/* Free when thread is ready */
DBUG_RETURN
(
0
);
}
dict
->
removeCachedTable
(
m_tabname
);
void
ha_ndbcluster
::
invalidate_dictionary_cache
(
bool
global
)
{
NDBDICT
*
dict
=
get_ndb
()
->
getDictionary
();
if
(
invalidate_dictionary_cache
(
table_share
,
get_ndb
(),
m_dbname
,
m_tabname
,
global
))
return
;
/* Invalidate indexes */
for
(
uint
i
=
0
;
i
<
table_share
->
keys
;
i
++
)
{
NDBINDEX
*
index
=
(
NDBINDEX
*
)
m_index
[
i
].
index
;
NDBINDEX
*
unique_index
=
(
NDBINDEX
*
)
m_index
[
i
].
unique_index
;
if
(
!
index
&&
!
unique_index
)
continue
;
if
(
!
index
&&
!
unique_index
)
continue
;
NDB_INDEX_TYPE
idx_type
=
m_index
[
i
].
type
;
switch
(
idx_type
)
{
...
...
@@ -546,6 +544,7 @@ void ha_ndbcluster::invalidate_dictionary_cache(bool global)
break
;
}
}
DBUG_RETURN
(
0
);
}
int
ha_ndbcluster
::
ndb_err
(
NdbTransaction
*
trans
)
...
...
sql/ha_ndbcluster.h
View file @
696c7677
...
...
@@ -685,9 +685,6 @@ static void set_tabname(const char *pathname, char *tabname);
bool
check_if_incompatible_data
(
HA_CREATE_INFO
*
info
,
uint
table_changes
);
static
int
invalidate_dictionary_cache
(
TABLE_SHARE
*
share
,
Ndb
*
ndb
,
const
char
*
dbname
,
const
char
*
tabname
,
bool
global
);
private:
friend
int
ndbcluster_drop_database_impl
(
const
char
*
path
);
...
...
@@ -774,7 +771,7 @@ private:
void
print_results
();
ulonglong
get_auto_increment
();
void
invalidate_dictionary_cache
(
bool
global
);
int
invalidate_dictionary_cache
(
bool
global
);
int
ndb_err
(
NdbTransaction
*
);
bool
uses_blob_value
();
...
...
storage/ndb/src/ndbapi/NdbEventOperationImpl.cpp
View file @
696c7677
...
...
@@ -1252,12 +1252,14 @@ NdbEventBuffer::execSUB_GCP_COMPLETE_REP(const SubGcpCompleteRep * const rep)
* Already completed GCI...
* Possible in case of resend during NF handling
*/
#ifdef VM_TRACE
ndbout
<<
"bucket == 0, gci:"
<<
gci
<<
" complete: "
<<
m_complete_data
<<
endl
;
for
(
Uint32
i
=
0
;
i
<
m_active_gci
.
size
();
i
++
)
{
ndbout
<<
i
<<
" - "
<<
m_active_gci
[
i
]
<<
endl
;
}
#endif
DBUG_VOID_RETURN_EVENT
;
}
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment