Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
M
mariadb
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Kirill Smelkov
mariadb
Commits
e01f587d
Commit
e01f587d
authored
Aug 13, 2007
by
jonas@perch.ndb.mysql.com
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
ndb - bug#28804
Handle out of transaction buffer in TC for INDX lookups
parent
3afa32dc
Changes
8
Show whitespace changes
Inline
Side-by-side
Showing
8 changed files
with
299 additions
and
55 deletions
+299
-55
ndb/src/kernel/blocks/ERROR_codes.txt
ndb/src/kernel/blocks/ERROR_codes.txt
+5
-1
ndb/src/kernel/blocks/dbtc/Dbtc.hpp
ndb/src/kernel/blocks/dbtc/Dbtc.hpp
+5
-5
ndb/src/kernel/blocks/dbtc/DbtcMain.cpp
ndb/src/kernel/blocks/dbtc/DbtcMain.cpp
+122
-41
ndb/src/ndbapi/NdbTransaction.cpp
ndb/src/ndbapi/NdbTransaction.cpp
+32
-6
ndb/src/ndbapi/ndberror.c
ndb/src/ndbapi/ndberror.c
+2
-0
ndb/test/ndbapi/testIndex.cpp
ndb/test/ndbapi/testIndex.cpp
+117
-0
ndb/test/run-test/daily-basic-tests.txt
ndb/test/run-test/daily-basic-tests.txt
+8
-0
sql/ha_ndbcluster.cc
sql/ha_ndbcluster.cc
+8
-2
No files found.
ndb/src/kernel/blocks/ERROR_codes.txt
View file @
e01f587d
...
...
@@ -6,7 +6,7 @@ Next DBTUP 4014
Next DBLQH 5043
Next DBDICT 6007
Next DBDIH 7183
Next DBTC 80
39
Next DBTC 80
52
Next CMVMI 9000
Next BACKUP 10022
Next DBUTIL 11002
...
...
@@ -296,6 +296,10 @@ ABORT OF TCKEYREQ
8038 : Simulate API disconnect just after SCAN_TAB_REQ
8039 : Simulate failure of TransactionBufferMemory allocation for OI lookup
8051 : Simulate failure of allocation for saveINDXKEYINFO
CMVMI
-----
...
...
ndb/src/kernel/blocks/dbtc/Dbtc.hpp
View file @
e01f587d
...
...
@@ -1497,12 +1497,12 @@ private:
void
clearCommitAckMarker
(
ApiConnectRecord
*
const
regApiPtr
,
TcConnectRecord
*
const
regTcPtr
);
// Trigger and index handling
bool
saveINDXKEYINFO
(
Signal
*
signal
,
int
saveINDXKEYINFO
(
Signal
*
signal
,
TcIndexOperation
*
indexOp
,
const
Uint32
*
src
,
Uint32
len
);
bool
receivedAllINDXKEYINFO
(
TcIndexOperation
*
indexOp
);
bool
saveINDXATTRINFO
(
Signal
*
signal
,
int
saveINDXATTRINFO
(
Signal
*
signal
,
TcIndexOperation
*
indexOp
,
const
Uint32
*
src
,
Uint32
len
);
...
...
ndb/src/kernel/blocks/dbtc/DbtcMain.cpp
View file @
e01f587d
...
...
@@ -1789,9 +1789,18 @@ start_failure:
}
//switch
}
static
inline
bool
compare_transid
(
Uint32
*
val0
,
Uint32
*
val1
)
{
Uint32
tmp0
=
val0
[
0
]
^
val1
[
0
];
Uint32
tmp1
=
val0
[
1
]
^
val1
[
1
];
return
(
tmp0
|
tmp1
)
==
0
;
}
void
Dbtc
::
execKEYINFO
(
Signal
*
signal
)
{
UintR
compare_transid1
,
compare_transid2
;
jamEntry
();
apiConnectptr
.
i
=
signal
->
theData
[
0
];
tmaxData
=
20
;
...
...
@@ -1801,10 +1810,8 @@ void Dbtc::execKEYINFO(Signal* signal)
}
//if
ptrAss
(
apiConnectptr
,
apiConnectRecord
);
ttransid_ptr
=
1
;
compare_transid1
=
apiConnectptr
.
p
->
transid
[
0
]
^
signal
->
theData
[
1
];
compare_transid2
=
apiConnectptr
.
p
->
transid
[
1
]
^
signal
->
theData
[
2
];
compare_transid1
=
compare_transid1
|
compare_transid2
;
if
(
compare_transid1
!=
0
)
{
if
(
compare_transid
(
apiConnectptr
.
p
->
transid
,
signal
->
theData
+
1
)
==
false
)
{
TCKEY_abort
(
signal
,
19
);
return
;
}
//if
...
...
@@ -2105,7 +2112,6 @@ void Dbtc::saveAttrbuf(Signal* signal)
void
Dbtc
::
execATTRINFO
(
Signal
*
signal
)
{
UintR
compare_transid1
,
compare_transid2
;
UintR
Tdata1
=
signal
->
theData
[
0
];
UintR
Tlength
=
signal
->
length
();
UintR
TapiConnectFilesize
=
capiConnectFilesize
;
...
...
@@ -2120,17 +2126,13 @@ void Dbtc::execATTRINFO(Signal* signal)
return
;
}
//if
UintR
Tdata2
=
signal
->
theData
[
1
];
UintR
Tdata3
=
signal
->
theData
[
2
];
ApiConnectRecord
*
const
regApiPtr
=
&
localApiConnectRecord
[
Tdata1
];
compare_transid1
=
regApiPtr
->
transid
[
0
]
^
Tdata2
;
compare_transid2
=
regApiPtr
->
transid
[
1
]
^
Tdata3
;
apiConnectptr
.
p
=
regApiPtr
;
compare_transid1
=
compare_transid1
|
compare_transid2
;
if
(
compare_transid1
!=
0
)
{
if
(
compare_transid
(
regApiPtr
->
transid
,
signal
->
theData
+
1
)
==
false
)
{
DEBUG
(
"Drop ATTRINFO, wrong transid, lenght="
<<
Tlength
<<
" transid("
<<
hex
<<
Tdata2
<<
", "
<<
Tdata3
);
<<
" transid("
<<
hex
<<
signal
->
theData
[
1
]
<<
", "
<<
signal
->
theData
[
2
]
);
TCKEY_abort
(
signal
,
19
);
return
;
}
//if
...
...
@@ -5456,11 +5458,32 @@ void Dbtc::execTC_COMMITREQ(Signal* signal)
}
}
//Dbtc::execTC_COMMITREQ()
/**
* TCROLLBACKREQ
*
* Format is:
*
* thedata[0] = apiconnectptr
* thedata[1] = transid[0]
* thedata[2] = transid[1]
* OPTIONAL thedata[3] = flags
*
* Flags:
* 0x1 = potentiallyBad data from API (try not to assert)
*/
void
Dbtc
::
execTCROLLBACKREQ
(
Signal
*
signal
)
{
bool
potentiallyBad
=
false
;
UintR
compare_transid1
,
compare_transid2
;
jamEntry
();
if
(
unlikely
((
signal
->
getLength
()
>=
4
)
&&
(
signal
->
theData
[
3
]
&
0x1
)))
{
ndbout_c
(
"Trying to roll back potentially bad txn
\n
"
);
potentiallyBad
=
true
;
}
apiConnectptr
.
i
=
signal
->
theData
[
0
];
if
(
apiConnectptr
.
i
>=
capiConnectFilesize
)
{
goto
TC_ROLL_warning
;
...
...
@@ -5547,11 +5570,13 @@ void Dbtc::execTCROLLBACKREQ(Signal* signal)
TC_ROLL_warning:
jam
();
if
(
likely
(
potentiallyBad
==
false
))
warningHandlerLab
(
signal
,
__LINE__
);
return
;
TC_ROLL_system_error:
jam
();
if
(
likely
(
potentiallyBad
==
false
))
systemErrorLab
(
signal
,
__LINE__
);
return
;
}
//Dbtc::execTCROLLBACKREQ()
...
...
@@ -11559,6 +11584,7 @@ void Dbtc::execTCINDXREQ(Signal* signal)
// This is a newly started transaction, clean-up
releaseAllSeizedIndexOperations
(
regApiPtr
);
regApiPtr
->
apiConnectstate
=
CS_STARTED
;
regApiPtr
->
transid
[
0
]
=
tcIndxReq
->
transId1
;
regApiPtr
->
transid
[
1
]
=
tcIndxReq
->
transId2
;
}
//if
...
...
@@ -11599,20 +11625,29 @@ void Dbtc::execTCINDXREQ(Signal* signal)
Uint32
includedIndexLength
=
MIN
(
indexLength
,
indexBufSize
);
indexOp
->
expectedAttrInfo
=
attrLength
;
Uint32
includedAttrLength
=
MIN
(
attrLength
,
attrBufSize
);
if
(
saveINDXKEYINFO
(
signal
,
int
ret
;
if
((
ret
=
saveINDXKEYINFO
(
signal
,
indexOp
,
dataPtr
,
includedIndexLength
))
{
includedIndexLength
))
==
0
)
{
jam
();
// We have received all we need
readIndexTable
(
signal
,
regApiPtr
,
indexOp
);
return
;
}
else
if
(
ret
==
-
1
)
{
jam
();
return
;
}
dataPtr
+=
includedIndexLength
;
if
(
saveINDXATTRINFO
(
signal
,
indexOp
,
dataPtr
,
includedAttrLength
))
{
includedAttrLength
)
==
0
)
{
jam
();
// We have received all we need
readIndexTable
(
signal
,
regApiPtr
,
indexOp
);
...
...
@@ -11715,13 +11750,25 @@ void Dbtc::execINDXKEYINFO(Signal* signal)
TcIndexOperationPtr
indexOpPtr
;
TcIndexOperation
*
indexOp
;
if
(
compare_transid
(
regApiPtr
->
transid
,
indxKeyInfo
->
transId
)
==
false
)
{
TCKEY_abort
(
signal
,
19
);
return
;
}
if
(
regApiPtr
->
apiConnectstate
==
CS_ABORTING
)
{
jam
();
return
;
}
if
((
indexOpPtr
.
i
=
regApiPtr
->
accumulatingIndexOp
)
!=
RNIL
)
{
indexOp
=
c_theIndexOperationPool
.
getPtr
(
indexOpPtr
.
i
);
if
(
saveINDXKEYINFO
(
signal
,
indexOp
,
src
,
keyInfoLength
))
{
keyInfoLength
)
==
0
)
{
jam
();
// We have received all we need
readIndexTable
(
signal
,
regApiPtr
,
indexOp
);
...
...
@@ -11748,17 +11795,31 @@ void Dbtc::execINDXATTRINFO(Signal* signal)
TcIndexOperationPtr
indexOpPtr
;
TcIndexOperation
*
indexOp
;
if
(
compare_transid
(
regApiPtr
->
transid
,
indxAttrInfo
->
transId
)
==
false
)
{
TCKEY_abort
(
signal
,
19
);
return
;
}
if
(
regApiPtr
->
apiConnectstate
==
CS_ABORTING
)
{
jam
();
return
;
}
if
((
indexOpPtr
.
i
=
regApiPtr
->
accumulatingIndexOp
)
!=
RNIL
)
{
indexOp
=
c_theIndexOperationPool
.
getPtr
(
indexOpPtr
.
i
);
if
(
saveINDXATTRINFO
(
signal
,
indexOp
,
src
,
attrInfoLength
))
{
attrInfoLength
)
==
0
)
{
jam
();
// We have received all we need
readIndexTable
(
signal
,
regApiPtr
,
indexOp
);
return
;
}
return
;
}
}
...
...
@@ -11766,12 +11827,13 @@ void Dbtc::execINDXATTRINFO(Signal* signal)
* Save signal INDXKEYINFO
* Return true if we have received all needed data
*/
bool
Dbtc
::
saveINDXKEYINFO
(
Signal
*
signal
,
int
Dbtc
::
saveINDXKEYINFO
(
Signal
*
signal
,
TcIndexOperation
*
indexOp
,
const
Uint32
*
src
,
Uint32
len
)
{
if
(
!
indexOp
->
keyInfo
.
append
(
src
,
len
))
{
if
(
ERROR_INSERTED
(
8039
)
||
!
indexOp
->
keyInfo
.
append
(
src
,
len
))
{
jam
();
// Failed to seize keyInfo, abort transaction
#ifdef VM_TRACE
...
...
@@ -11781,15 +11843,17 @@ bool Dbtc::saveINDXKEYINFO(Signal* signal,
apiConnectptr
.
i
=
indexOp
->
connectionIndex
;
ptrCheckGuard
(
apiConnectptr
,
capiConnectFilesize
,
apiConnectRecord
);
releaseIndexOperation
(
apiConnectptr
.
p
,
indexOp
);
terrorCode
=
4000
;
terrorCode
=
289
;
if
(
TcKeyReq
::
getExecuteFlag
(
indexOp
->
tcIndxReq
.
requestInfo
))
apiConnectptr
.
p
->
m_exec_flag
=
1
;
abortErrorLab
(
signal
);
return
false
;
return
-
1
;
}
if
(
receivedAllINDXKEYINFO
(
indexOp
)
&&
receivedAllINDXATTRINFO
(
indexOp
))
{
jam
();
return
true
;
return
0
;
}
return
false
;
return
1
;
}
bool
Dbtc
::
receivedAllINDXKEYINFO
(
TcIndexOperation
*
indexOp
)
...
...
@@ -11801,12 +11865,13 @@ bool Dbtc::receivedAllINDXKEYINFO(TcIndexOperation* indexOp)
* Save signal INDXATTRINFO
* Return true if we have received all needed data
*/
bool
Dbtc
::
saveINDXATTRINFO
(
Signal
*
signal
,
int
Dbtc
::
saveINDXATTRINFO
(
Signal
*
signal
,
TcIndexOperation
*
indexOp
,
const
Uint32
*
src
,
Uint32
len
)
{
if
(
!
indexOp
->
attrInfo
.
append
(
src
,
len
))
{
if
(
ERROR_INSERTED
(
8051
)
||
!
indexOp
->
attrInfo
.
append
(
src
,
len
))
{
jam
();
#ifdef VM_TRACE
ndbout_c
(
"Dbtc::saveINDXATTRINFO: Failed to seize attrInfo
\n
"
);
...
...
@@ -11814,15 +11879,17 @@ bool Dbtc::saveINDXATTRINFO(Signal* signal,
apiConnectptr
.
i
=
indexOp
->
connectionIndex
;
ptrCheckGuard
(
apiConnectptr
,
capiConnectFilesize
,
apiConnectRecord
);
releaseIndexOperation
(
apiConnectptr
.
p
,
indexOp
);
terrorCode
=
4000
;
terrorCode
=
289
;
if
(
TcKeyReq
::
getExecuteFlag
(
indexOp
->
tcIndxReq
.
requestInfo
))
apiConnectptr
.
p
->
m_exec_flag
=
1
;
abortErrorLab
(
signal
);
return
false
;
return
-
1
;
}
if
(
receivedAllINDXKEYINFO
(
indexOp
)
&&
receivedAllINDXATTRINFO
(
indexOp
))
{
jam
();
return
true
;
return
0
;
}
return
false
;
return
1
;
}
bool
Dbtc
::
receivedAllINDXATTRINFO
(
TcIndexOperation
*
indexOp
)
...
...
@@ -12006,6 +12073,9 @@ void Dbtc::execTCKEYREF(Signal* signal)
tcIndxRef
->
transId
[
0
]
=
tcKeyRef
->
transId
[
0
];
tcIndxRef
->
transId
[
1
]
=
tcKeyRef
->
transId
[
1
];
tcIndxRef
->
errorCode
=
tcKeyRef
->
errorCode
;
releaseIndexOperation
(
regApiPtr
,
indexOp
);
sendSignal
(
regApiPtr
->
ndbapiBlockref
,
GSN_TCINDXREF
,
signal
,
TcKeyRef
::
SignalLength
,
JBB
);
return
;
...
...
@@ -12538,7 +12608,18 @@ void Dbtc::executeIndexOperation(Signal* signal,
bool
Dbtc
::
seizeIndexOperation
(
ApiConnectRecord
*
regApiPtr
,
TcIndexOperationPtr
&
indexOpPtr
)
{
return
regApiPtr
->
theSeizedIndexOperations
.
seize
(
indexOpPtr
);
if
(
regApiPtr
->
theSeizedIndexOperations
.
seize
(
indexOpPtr
))
{
ndbassert
(
indexOpPtr
.
p
->
expectedKeyInfo
==
0
);
ndbassert
(
indexOpPtr
.
p
->
keyInfo
.
getSize
()
==
0
);
ndbassert
(
indexOpPtr
.
p
->
expectedAttrInfo
==
0
);
ndbassert
(
indexOpPtr
.
p
->
attrInfo
.
getSize
()
==
0
);
ndbassert
(
indexOpPtr
.
p
->
expectedTransIdAI
==
0
);
ndbassert
(
indexOpPtr
.
p
->
transIdAI
.
getSize
()
==
0
);
return
true
;
}
return
false
;
}
void
Dbtc
::
releaseIndexOperation
(
ApiConnectRecord
*
regApiPtr
,
...
...
ndb/src/ndbapi/NdbTransaction.cpp
View file @
e01f587d
...
...
@@ -481,12 +481,27 @@ NdbTransaction::executeNoBlobs(ExecType aTypeOfExec,
while
(
1
)
{
int
noOfComp
=
tNdb
->
sendPollNdb
(
3
*
timeout
,
1
,
forceSend
);
if
(
noOfComp
==
0
)
{
/**
* This timeout situation can occur if NDB crashes.
/*
* Just for fun, this is only one of two places where
* we could hit this error... It's quite possible we
* hit it in Ndbif.cpp in Ndb::check_send_timeout()
*
* We behave rather similarly in both places.
* Hitting this is certainly a bug though...
*/
ndbout
<<
"This timeout should never occur, execute(..)"
<<
endl
;
g_eventLogger
.
error
(
"WARNING: Timeout in executeNoBlobs() waiting for "
"response from NDB data nodes. This should NEVER "
"occur. You have likely hit a NDB Bug. Please "
"file a bug."
);
DBUG_PRINT
(
"error"
,(
"This timeout should never occure, execute()"
));
g_eventLogger
.
error
(
"Forcibly trying to rollback txn (%p"
") to try to clean up data node resources."
,
this
);
executeNoBlobs
(
NdbTransaction
::
Rollback
);
theError
.
code
=
4012
;
setOperationErrorCodeAbort
(
4012
);
// Error code for "Cluster Failure"
theError
.
status
=
NdbError
::
PermanentError
;
theError
.
classification
=
NdbError
::
TimeoutExpired
;
setOperationErrorCodeAbort
(
4012
);
// ndbd timeout
DBUG_RETURN
(
-
1
);
}
//if
...
...
@@ -550,6 +565,11 @@ NdbTransaction::executeAsynchPrepare( ExecType aTypeOfExec,
*/
if
(
theError
.
code
!=
0
)
DBUG_PRINT
(
"enter"
,
(
"Resetting error %d on execute"
,
theError
.
code
));
/**
* for timeout (4012) we want sendROLLBACK to behave differently.
* Else, normal behaviour of reset errcode
*/
if
(
theError
.
code
!=
4012
)
theError
.
code
=
0
;
NdbScanOperation
*
tcOp
=
m_theFirstScanOperation
;
if
(
tcOp
!=
0
){
...
...
@@ -873,6 +893,12 @@ NdbTransaction::sendROLLBACK() // Send a TCROLLBACKREQ signal;
tSignal
.
setData
(
theTCConPtr
,
1
);
tSignal
.
setData
(
tTransId1
,
2
);
tSignal
.
setData
(
tTransId2
,
3
);
if
(
theError
.
code
==
4012
)
{
g_eventLogger
.
error
(
"Sending TCROLLBACKREQ with Bad flag"
);
tSignal
.
setLength
(
tSignal
.
getLength
()
+
1
);
// + flags
tSignal
.
setData
(
0x1
,
4
);
// potentially bad data
}
tReturnCode
=
tp
->
sendSignal
(
&
tSignal
,
theDBnode
);
if
(
tReturnCode
!=
-
1
)
{
theSendStatus
=
sendTC_ROLLBACK
;
...
...
ndb/src/ndbapi/ndberror.c
View file @
e01f587d
...
...
@@ -173,6 +173,8 @@ ErrorBundle ErrorCodes[] = {
{
4022
,
TR
,
"Out of Send Buffer space in NDB API"
},
{
4032
,
TR
,
"Out of Send Buffer space in NDB API"
},
{
288
,
TR
,
"Out of index operations in transaction coordinator (increase MaxNoOfConcurrentIndexOperations)"
},
{
289
,
TR
,
"Out of transaction buffer memory in TC (increase TransactionBufferMemory)"
},
/**
* InsufficientSpace
*/
...
...
ndb/test/ndbapi/testIndex.cpp
View file @
e01f587d
...
...
@@ -1297,6 +1297,102 @@ runBug25059(NDBT_Context* ctx, NDBT_Step* step)
return
res
;
}
int
tcSaveINDX_test
(
NDBT_Context
*
ctx
,
NDBT_Step
*
step
,
int
inject_err
)
{
int
result
=
NDBT_OK
;
Ndb
*
pNdb
=
GETNDB
(
step
);
NdbDictionary
::
Dictionary
*
dict
=
pNdb
->
getDictionary
();
const
NdbDictionary
::
Index
*
idx
=
dict
->
getIndex
(
pkIdxName
,
*
ctx
->
getTab
());
HugoOperations
ops
(
*
ctx
->
getTab
(),
idx
);
g_err
<<
"Using INDEX: "
<<
pkIdxName
<<
endl
;
NdbRestarter
restarter
;
int
loops
=
ctx
->
getNumLoops
();
const
int
rows
=
ctx
->
getNumRecords
();
const
int
batchsize
=
ctx
->
getProperty
(
"BatchSize"
,
1
);
for
(
int
bs
=
1
;
bs
<
loops
;
bs
++
)
{
int
c
=
0
;
while
(
c
++
<
loops
)
{
g_err
<<
"BS "
<<
bs
<<
" LOOP #"
<<
c
<<
endl
;
g_err
<<
"inserting error on op#"
<<
c
<<
endl
;
CHECK
(
ops
.
startTransaction
(
pNdb
)
==
0
);
for
(
int
i
=
1
;
i
<=
c
;
i
++
)
{
if
(
i
==
c
)
{
if
(
restarter
.
insertErrorInAllNodes
(
inject_err
)
!=
0
)
{
g_err
<<
"**** FAILED to insert error"
<<
endl
;
result
=
NDBT_FAILED
;
break
;
}
}
CHECK
(
ops
.
indexReadRecords
(
pNdb
,
pkIdxName
,
i
,
false
,
1
)
==
0
);
if
(
i
%
bs
==
0
||
i
==
c
)
{
if
(
i
<
c
)
{
if
(
ops
.
execute_NoCommit
(
pNdb
,
AO_IgnoreError
)
!=
NDBT_OK
)
{
g_err
<<
"**** executeNoCommit should have succeeded"
<<
endl
;
result
=
NDBT_FAILED
;
}
}
else
{
if
(
ops
.
execute_NoCommit
(
pNdb
,
AO_IgnoreError
)
!=
289
)
{
g_err
<<
"**** executeNoCommit should have failed with 289"
<<
endl
;
result
=
NDBT_FAILED
;
}
g_err
<<
"NdbError.code= "
<<
ops
.
getTransaction
()
->
getNdbError
().
code
<<
endl
;
break
;
}
}
}
CHECK
(
ops
.
closeTransaction
(
pNdb
)
==
0
);
if
(
restarter
.
insertErrorInAllNodes
(
0
)
!=
0
)
{
g_err
<<
"**** Failed to error insert(0)"
<<
endl
;
return
NDBT_FAILED
;
}
CHECK
(
ops
.
startTransaction
(
pNdb
)
==
0
);
if
(
ops
.
indexReadRecords
(
pNdb
,
pkIdxName
,
0
,
0
,
rows
)
!=
0
){
g_err
<<
"**** Index read failed"
<<
endl
;
return
NDBT_FAILED
;
}
CHECK
(
ops
.
closeTransaction
(
pNdb
)
==
0
);
}
}
return
result
;
}
int
runBug28804
(
NDBT_Context
*
ctx
,
NDBT_Step
*
step
)
{
return
tcSaveINDX_test
(
ctx
,
step
,
8039
);
}
int
runBug28804_ATTRINFO
(
NDBT_Context
*
ctx
,
NDBT_Step
*
step
)
{
return
tcSaveINDX_test
(
ctx
,
step
,
8051
);
}
NDBT_TESTSUITE
(
testIndex
);
TESTCASE
(
"CreateAll"
,
"Test that we can create all various indexes on each table
\n
"
...
...
@@ -1628,6 +1724,27 @@ TESTCASE("Bug25059",
STEP
(
runBug25059
);
FINALIZER
(
createPkIndex_Drop
);
}
TESTCASE
(
"Bug28804"
,
"Test behaviour on out of TransactionBufferMemory for index lookup"
){
TC_PROPERTY
(
"LoggedIndexes"
,
(
unsigned
)
0
);
INITIALIZER
(
runClearTable
);
INITIALIZER
(
createPkIndex
);
INITIALIZER
(
runLoadTable
);
STEP
(
runBug28804
);
FINALIZER
(
createPkIndex_Drop
);
FINALIZER
(
runClearTable
);
}
TESTCASE
(
"Bug28804_ATTRINFO"
,
"Test behaviour on out of TransactionBufferMemory for index lookup"
" in saveINDXATTRINFO"
){
TC_PROPERTY
(
"LoggedIndexes"
,
(
unsigned
)
0
);
INITIALIZER
(
runClearTable
);
INITIALIZER
(
createPkIndex
);
INITIALIZER
(
runLoadTable
);
STEP
(
runBug28804_ATTRINFO
);
FINALIZER
(
createPkIndex_Drop
);
FINALIZER
(
runClearTable
);
}
NDBT_TESTSUITE_END
(
testIndex
);
int
main
(
int
argc
,
const
char
**
argv
){
...
...
ndb/test/run-test/daily-basic-tests.txt
View file @
e01f587d
...
...
@@ -779,3 +779,11 @@ cmd: DbAsyncGenerator
args: -time 60 -p 1 -proc 25
type: bench
max-time: 180
cmd: testIndex
args: -n Bug28804 T1 T3
max-time: 180
cmd: testIndex
args: -n Bug28804_ATTRINFO T1 T3
sql/ha_ndbcluster.cc
View file @
e01f587d
...
...
@@ -1758,9 +1758,15 @@ int ha_ndbcluster::unique_index_read(const byte *key,
if
(
execute_no_commit_ie
(
this
,
trans
,
false
)
!=
0
)
{
int
err
=
ndb_err
(
trans
);
if
(
err
==
HA_ERR_KEY_NOT_FOUND
)
table
->
status
=
STATUS_NOT_FOUND
;
DBUG_RETURN
(
ndb_err
(
trans
));
else
table
->
status
=
STATUS_GARBAGE
;
DBUG_RETURN
(
err
);
}
// The value have now been fetched from NDB
unpack_record
(
buf
);
table
->
status
=
0
;
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment