Commit 626c03c7 authored by tomas@whalegate.ndb.mysql.com's avatar tomas@whalegate.ndb.mysql.com

Merge tulin@bk-internal.mysql.com:/home/bk/mysql-5.1-new-ndb

into  whalegate.ndb.mysql.com:/home/tomas/mysql-5.1-new-ndb
parents d14cba10 dfc1e0cc
......@@ -331,6 +331,7 @@ Thd_ndb::Thd_ndb()
all= NULL;
stmt= NULL;
m_error= FALSE;
m_error_code= 0;
query_state&= NDB_QUERY_NORMAL;
options= 0;
(void) hash_init(&open_tables, &my_charset_bin, 5, 0, 0,
......@@ -366,6 +367,7 @@ Thd_ndb::init_open_tables()
{
count= 0;
m_error= FALSE;
m_error_code= 0;
my_hash_reset(&open_tables);
}
......@@ -489,6 +491,7 @@ void ha_ndbcluster::no_uncommitted_rows_execute_failure()
return;
DBUG_ENTER("ha_ndbcluster::no_uncommitted_rows_execute_failure");
get_thd_ndb(current_thd)->m_error= TRUE;
get_thd_ndb(current_thd)->m_error_code= 0;
DBUG_VOID_RETURN;
}
......@@ -2086,9 +2089,15 @@ int ha_ndbcluster::unique_index_read(const uchar *key,
if (execute_no_commit_ie(this,trans,FALSE) != 0 ||
op->getNdbError().code)
{
int err= ndb_err(trans);
if(err==HA_ERR_KEY_NOT_FOUND)
table->status= STATUS_NOT_FOUND;
DBUG_RETURN(ndb_err(trans));
else
table->status= STATUS_GARBAGE;
DBUG_RETURN(err);
}
// The value have now been fetched from NDB
unpack_record(buf);
table->status= 0;
......
......@@ -264,12 +264,13 @@ static void run_query(THD *thd, char *buf, char *end,
int i;
Thd_ndb *thd_ndb= get_thd_ndb(thd);
for (i= 0; no_print_error[i]; i++)
if (thd_ndb->m_error == no_print_error[i])
if ((thd_ndb->m_error_code == no_print_error[i]) ||
(thd->net.last_errno == no_print_error[i]))
break;
if (!no_print_error[i])
sql_print_error("NDB: %s: error %s %d(ndb: %d) %d %d",
buf, thd->net.last_error, thd->net.last_errno,
thd_ndb->m_error,
thd_ndb->m_error_code,
thd->net.report_error, thd->query_error);
}
......@@ -779,7 +780,10 @@ static int ndbcluster_create_ndb_apply_status_table(THD *thd)
" end_pos BIGINT UNSIGNED NOT NULL, "
" PRIMARY KEY USING HASH (server_id) ) ENGINE=NDB");
const int no_print_error[3]= {701, 4009, 0}; // do not print error 701 etc
const int no_print_error[4]= {ER_TABLE_EXISTS_ERROR,
701,
4009,
0}; // do not print error 701 etc
run_query(thd, buf, end, no_print_error, TRUE);
DBUG_RETURN(0);
......@@ -836,7 +840,10 @@ static int ndbcluster_create_schema_table(THD *thd)
" type INT UNSIGNED NOT NULL,"
" PRIMARY KEY USING HASH (db,name) ) ENGINE=NDB");
const int no_print_error[3]= {701, 4009, 0}; // do not print error 701 etc
const int no_print_error[4]= {ER_TABLE_EXISTS_ERROR,
701,
4009,
0}; // do not print error 701 etc
run_query(thd, buf, end, no_print_error, TRUE);
DBUG_RETURN(0);
......
......@@ -6,7 +6,7 @@ Next DBTUP 4029
Next DBLQH 5045
Next DBDICT 6008
Next DBDIH 7186
Next DBTC 8040
Next DBTC 8052
Next CMVMI 9000
Next BACKUP 10038
Next DBUTIL 11002
......@@ -304,6 +304,10 @@ ABORT OF TCKEYREQ
8038 : Simulate API disconnect just after SCAN_TAB_REQ
8039 : Simulate failure of TransactionBufferMemory allocation for OI lookup
8051 : Simulate failure of allocation for saveINDXKEYINFO
CMVMI
-----
......
......@@ -1499,12 +1499,12 @@ private:
void clearCommitAckMarker(ApiConnectRecord * const regApiPtr,
TcConnectRecord * const regTcPtr);
// Trigger and index handling
bool saveINDXKEYINFO(Signal* signal,
int saveINDXKEYINFO(Signal* signal,
TcIndexOperation* indexOp,
const Uint32 *src,
Uint32 len);
bool receivedAllINDXKEYINFO(TcIndexOperation* indexOp);
bool saveINDXATTRINFO(Signal* signal,
int saveINDXATTRINFO(Signal* signal,
TcIndexOperation* indexOp,
const Uint32 *src,
Uint32 len);
......
......@@ -1800,9 +1800,18 @@ start_failure:
}//switch
}
static
inline
bool
compare_transid(Uint32* val0, Uint32* val1)
{
Uint32 tmp0 = val0[0] ^ val1[0];
Uint32 tmp1 = val0[1] ^ val1[1];
return (tmp0 | tmp1) == 0;
}
void Dbtc::execKEYINFO(Signal* signal)
{
UintR compare_transid1, compare_transid2;
jamEntry();
apiConnectptr.i = signal->theData[0];
tmaxData = 20;
......@@ -1812,10 +1821,8 @@ void Dbtc::execKEYINFO(Signal* signal)
}//if
ptrAss(apiConnectptr, apiConnectRecord);
ttransid_ptr = 1;
compare_transid1 = apiConnectptr.p->transid[0] ^ signal->theData[1];
compare_transid2 = apiConnectptr.p->transid[1] ^ signal->theData[2];
compare_transid1 = compare_transid1 | compare_transid2;
if (compare_transid1 != 0) {
if (compare_transid(apiConnectptr.p->transid, signal->theData+1) == false)
{
TCKEY_abort(signal, 19);
return;
}//if
......@@ -2116,7 +2123,6 @@ void Dbtc::saveAttrbuf(Signal* signal)
void Dbtc::execATTRINFO(Signal* signal)
{
UintR compare_transid1, compare_transid2;
UintR Tdata1 = signal->theData[0];
UintR Tlength = signal->length();
UintR TapiConnectFilesize = capiConnectFilesize;
......@@ -2131,17 +2137,13 @@ void Dbtc::execATTRINFO(Signal* signal)
return;
}//if
UintR Tdata2 = signal->theData[1];
UintR Tdata3 = signal->theData[2];
ApiConnectRecord * const regApiPtr = &localApiConnectRecord[Tdata1];
compare_transid1 = regApiPtr->transid[0] ^ Tdata2;
compare_transid2 = regApiPtr->transid[1] ^ Tdata3;
apiConnectptr.p = regApiPtr;
compare_transid1 = compare_transid1 | compare_transid2;
if (compare_transid1 != 0) {
if (compare_transid(regApiPtr->transid, signal->theData+1) == false)
{
DEBUG("Drop ATTRINFO, wrong transid, lenght="<<Tlength
<< " transid("<<hex<<Tdata2<<", "<<Tdata3);
<< " transid("<<hex<<signal->theData[1]<<", "<<signal->theData[2]);
TCKEY_abort(signal, 19);
return;
}//if
......@@ -5456,11 +5458,32 @@ void Dbtc::execTC_COMMITREQ(Signal* signal)
}
}//Dbtc::execTC_COMMITREQ()
/**
* TCROLLBACKREQ
*
* Format is:
*
* thedata[0] = apiconnectptr
* thedata[1] = transid[0]
* thedata[2] = transid[1]
* OPTIONAL thedata[3] = flags
*
* Flags:
* 0x1 = potentiallyBad data from API (try not to assert)
*/
void Dbtc::execTCROLLBACKREQ(Signal* signal)
{
bool potentiallyBad= false;
UintR compare_transid1, compare_transid2;
jamEntry();
if(unlikely((signal->getLength() >= 4) && (signal->theData[3] & 0x1)))
{
ndbout_c("Trying to roll back potentially bad txn\n");
potentiallyBad= true;
}
apiConnectptr.i = signal->theData[0];
if (apiConnectptr.i >= capiConnectFilesize) {
goto TC_ROLL_warning;
......@@ -5547,11 +5570,13 @@ void Dbtc::execTCROLLBACKREQ(Signal* signal)
TC_ROLL_warning:
jam();
if(likely(potentiallyBad==false))
warningHandlerLab(signal, __LINE__);
return;
TC_ROLL_system_error:
jam();
if(likely(potentiallyBad==false))
systemErrorLab(signal, __LINE__);
return;
}//Dbtc::execTCROLLBACKREQ()
......@@ -11566,6 +11591,7 @@ void Dbtc::execTCINDXREQ(Signal* signal)
// This is a newly started transaction, clean-up
releaseAllSeizedIndexOperations(regApiPtr);
regApiPtr->apiConnectstate = CS_STARTED;
regApiPtr->transid[0] = tcIndxReq->transId1;
regApiPtr->transid[1] = tcIndxReq->transId2;
}//if
......@@ -11606,20 +11632,29 @@ void Dbtc::execTCINDXREQ(Signal* signal)
Uint32 includedIndexLength = MIN(indexLength, indexBufSize);
indexOp->expectedAttrInfo = attrLength;
Uint32 includedAttrLength = MIN(attrLength, attrBufSize);
if (saveINDXKEYINFO(signal,
int ret;
if ((ret = saveINDXKEYINFO(signal,
indexOp,
dataPtr,
includedIndexLength)) {
includedIndexLength)) == 0)
{
jam();
// We have received all we need
readIndexTable(signal, regApiPtr, indexOp);
return;
}
else if (ret == -1)
{
jam();
return;
}
dataPtr += includedIndexLength;
if (saveINDXATTRINFO(signal,
indexOp,
dataPtr,
includedAttrLength)) {
includedAttrLength) == 0) {
jam();
// We have received all we need
readIndexTable(signal, regApiPtr, indexOp);
......@@ -11722,13 +11757,25 @@ void Dbtc::execINDXKEYINFO(Signal* signal)
TcIndexOperationPtr indexOpPtr;
TcIndexOperation* indexOp;
if (compare_transid(regApiPtr->transid, indxKeyInfo->transId) == false)
{
TCKEY_abort(signal, 19);
return;
}
if (regApiPtr->apiConnectstate == CS_ABORTING)
{
jam();
return;
}
if((indexOpPtr.i = regApiPtr->accumulatingIndexOp) != RNIL)
{
indexOp = c_theIndexOperationPool.getPtr(indexOpPtr.i);
if (saveINDXKEYINFO(signal,
indexOp,
src,
keyInfoLength)) {
keyInfoLength) == 0) {
jam();
// We have received all we need
readIndexTable(signal, regApiPtr, indexOp);
......@@ -11755,17 +11802,31 @@ void Dbtc::execINDXATTRINFO(Signal* signal)
TcIndexOperationPtr indexOpPtr;
TcIndexOperation* indexOp;
if (compare_transid(regApiPtr->transid, indxAttrInfo->transId) == false)
{
TCKEY_abort(signal, 19);
return;
}
if (regApiPtr->apiConnectstate == CS_ABORTING)
{
jam();
return;
}
if((indexOpPtr.i = regApiPtr->accumulatingIndexOp) != RNIL)
{
indexOp = c_theIndexOperationPool.getPtr(indexOpPtr.i);
if (saveINDXATTRINFO(signal,
indexOp,
src,
attrInfoLength)) {
attrInfoLength) == 0) {
jam();
// We have received all we need
readIndexTable(signal, regApiPtr, indexOp);
return;
}
return;
}
}
......@@ -11773,12 +11834,13 @@ void Dbtc::execINDXATTRINFO(Signal* signal)
* Save signal INDXKEYINFO
* Return true if we have received all needed data
*/
bool Dbtc::saveINDXKEYINFO(Signal* signal,
int
Dbtc::saveINDXKEYINFO(Signal* signal,
TcIndexOperation* indexOp,
const Uint32 *src,
Uint32 len)
{
if (!indexOp->keyInfo.append(src, len)) {
if (ERROR_INSERTED(8039) || !indexOp->keyInfo.append(src, len)) {
jam();
// Failed to seize keyInfo, abort transaction
#ifdef VM_TRACE
......@@ -11788,15 +11850,17 @@ bool Dbtc::saveINDXKEYINFO(Signal* signal,
apiConnectptr.i = indexOp->connectionIndex;
ptrCheckGuard(apiConnectptr, capiConnectFilesize, apiConnectRecord);
releaseIndexOperation(apiConnectptr.p, indexOp);
terrorCode = 4000;
terrorCode = 289;
if(TcKeyReq::getExecuteFlag(indexOp->tcIndxReq.requestInfo))
apiConnectptr.p->m_exec_flag= 1;
abortErrorLab(signal);
return false;
return -1;
}
if (receivedAllINDXKEYINFO(indexOp) && receivedAllINDXATTRINFO(indexOp)) {
jam();
return true;
return 0;
}
return false;
return 1;
}
bool Dbtc::receivedAllINDXKEYINFO(TcIndexOperation* indexOp)
......@@ -11808,12 +11872,13 @@ bool Dbtc::receivedAllINDXKEYINFO(TcIndexOperation* indexOp)
* Save signal INDXATTRINFO
* Return true if we have received all needed data
*/
bool Dbtc::saveINDXATTRINFO(Signal* signal,
int
Dbtc::saveINDXATTRINFO(Signal* signal,
TcIndexOperation* indexOp,
const Uint32 *src,
Uint32 len)
{
if (!indexOp->attrInfo.append(src, len)) {
if (ERROR_INSERTED(8051) || !indexOp->attrInfo.append(src, len)) {
jam();
#ifdef VM_TRACE
ndbout_c("Dbtc::saveINDXATTRINFO: Failed to seize attrInfo\n");
......@@ -11821,15 +11886,17 @@ bool Dbtc::saveINDXATTRINFO(Signal* signal,
apiConnectptr.i = indexOp->connectionIndex;
ptrCheckGuard(apiConnectptr, capiConnectFilesize, apiConnectRecord);
releaseIndexOperation(apiConnectptr.p, indexOp);
terrorCode = 4000;
terrorCode = 289;
if(TcKeyReq::getExecuteFlag(indexOp->tcIndxReq.requestInfo))
apiConnectptr.p->m_exec_flag= 1;
abortErrorLab(signal);
return false;
return -1;
}
if (receivedAllINDXKEYINFO(indexOp) && receivedAllINDXATTRINFO(indexOp)) {
jam();
return true;
return 0;
}
return false;
return 1;
}
bool Dbtc::receivedAllINDXATTRINFO(TcIndexOperation* indexOp)
......@@ -12013,6 +12080,9 @@ void Dbtc::execTCKEYREF(Signal* signal)
tcIndxRef->transId[0] = tcKeyRef->transId[0];
tcIndxRef->transId[1] = tcKeyRef->transId[1];
tcIndxRef->errorCode = tcKeyRef->errorCode;
releaseIndexOperation(regApiPtr, indexOp);
sendSignal(regApiPtr->ndbapiBlockref,
GSN_TCINDXREF, signal, TcKeyRef::SignalLength, JBB);
return;
......@@ -12557,7 +12627,18 @@ void Dbtc::executeIndexOperation(Signal* signal,
bool Dbtc::seizeIndexOperation(ApiConnectRecord* regApiPtr,
TcIndexOperationPtr& indexOpPtr)
{
return regApiPtr->theSeizedIndexOperations.seize(indexOpPtr);
if (regApiPtr->theSeizedIndexOperations.seize(indexOpPtr))
{
ndbassert(indexOpPtr.p->expectedKeyInfo == 0);
ndbassert(indexOpPtr.p->keyInfo.getSize() == 0);
ndbassert(indexOpPtr.p->expectedAttrInfo == 0);
ndbassert(indexOpPtr.p->attrInfo.getSize() == 0);
ndbassert(indexOpPtr.p->expectedTransIdAI == 0);
ndbassert(indexOpPtr.p->transIdAI.getSize() == 0);
return true;
}
return false;
}
void Dbtc::releaseIndexOperation(ApiConnectRecord* regApiPtr,
......
......@@ -1136,7 +1136,12 @@ NdbBlob::readTableParts(char* buf, Uint32 part, Uint32 count)
while (n < count) {
NdbOperation* tOp = theNdbCon->getNdbOperation(theBlobTable);
if (tOp == NULL ||
tOp->committedRead() == -1 ||
/*
* This was committedRead() before. However lock on main
* table tuple does not fully protect blob parts since DBTUP
* commits each tuple separately.
*/
tOp->readTuple() == -1 ||
setPartKeyValue(tOp, part + n) == -1 ||
tOp->getValue((Uint32)3, buf) == NULL) {
setErrorCode(tOp);
......
......@@ -453,12 +453,27 @@ NdbTransaction::executeNoBlobs(NdbTransaction::ExecType aTypeOfExec,
while (1) {
int noOfComp = tNdb->sendPollNdb(3 * timeout, 1, forceSend);
if (noOfComp == 0) {
/**
* This timeout situation can occur if NDB crashes.
/*
* Just for fun, this is only one of two places where
* we could hit this error... It's quite possible we
* hit it in Ndbif.cpp in Ndb::check_send_timeout()
*
* We behave rather similarly in both places.
* Hitting this is certainly a bug though...
*/
ndbout << "This timeout should never occur, execute(..)" << endl;
g_eventLogger.error("WARNING: Timeout in executeNoBlobs() waiting for "
"response from NDB data nodes. This should NEVER "
"occur. You have likely hit a NDB Bug. Please "
"file a bug.");
DBUG_PRINT("error",("This timeout should never occure, execute()"));
g_eventLogger.error("Forcibly trying to rollback txn (%p"
") to try to clean up data node resources.",
this);
executeNoBlobs(NdbTransaction::Rollback);
theError.code = 4012;
setOperationErrorCodeAbort(4012); // Error code for "Cluster Failure"
theError.status= NdbError::PermanentError;
theError.classification= NdbError::TimeoutExpired;
setOperationErrorCodeAbort(4012); // ndbd timeout
DBUG_RETURN(-1);
}//if
......@@ -522,6 +537,11 @@ NdbTransaction::executeAsynchPrepare(NdbTransaction::ExecType aTypeOfExec,
*/
if (theError.code != 0)
DBUG_PRINT("enter", ("Resetting error %d on execute", theError.code));
/**
* for timeout (4012) we want sendROLLBACK to behave differently.
* Else, normal behaviour of reset errcode
*/
if (theError.code != 4012)
theError.code = 0;
NdbScanOperation* tcOp = m_theFirstScanOperation;
if (tcOp != 0){
......@@ -843,6 +863,12 @@ NdbTransaction::sendROLLBACK() // Send a TCROLLBACKREQ signal;
tSignal.setData(theTCConPtr, 1);
tSignal.setData(tTransId1, 2);
tSignal.setData(tTransId2, 3);
if(theError.code == 4012)
{
g_eventLogger.error("Sending TCROLLBACKREQ with Bad flag");
tSignal.setLength(tSignal.getLength() + 1); // + flags
tSignal.setData(0x1, 4); // potentially bad data
}
tReturnCode = tp->sendSignal(&tSignal,theDBnode);
if (tReturnCode != -1) {
theSendStatus = sendTC_ROLLBACK;
......
......@@ -189,6 +189,7 @@ ErrorBundle ErrorCodes[] = {
{ 4032, DMEC, TR, "Out of Send Buffer space in NDB API" },
{ 1501, DMEC, TR, "Out of undo space" },
{ 288, DMEC, TR, "Out of index operations in transaction coordinator (increase MaxNoOfConcurrentIndexOperations)" },
{ 289, DMEC, TR, "Out of transaction buffer memory in TC (increase TransactionBufferMemory)" },
/**
* InsufficientSpace
......
......@@ -1298,6 +1298,102 @@ runBug25059(NDBT_Context* ctx, NDBT_Step* step)
return res;
}
int tcSaveINDX_test(NDBT_Context* ctx, NDBT_Step* step, int inject_err)
{
int result= NDBT_OK;
Ndb* pNdb = GETNDB(step);
NdbDictionary::Dictionary * dict = pNdb->getDictionary();
const NdbDictionary::Index * idx = dict->getIndex(pkIdxName, *ctx->getTab());
HugoOperations ops(*ctx->getTab(), idx);
g_err << "Using INDEX: " << pkIdxName << endl;
NdbRestarter restarter;
int loops = ctx->getNumLoops();
const int rows = ctx->getNumRecords();
const int batchsize = ctx->getProperty("BatchSize", 1);
for(int bs=1; bs < loops; bs++)
{
int c= 0;
while (c++ < loops)
{
g_err << "BS " << bs << " LOOP #" << c << endl;
g_err << "inserting error on op#" << c << endl;
CHECK(ops.startTransaction(pNdb) == 0);
for(int i=1;i<=c;i++)
{
if(i==c)
{
if(restarter.insertErrorInAllNodes(inject_err)!=0)
{
g_err << "**** FAILED to insert error" << endl;
result= NDBT_FAILED;
break;
}
}
CHECK(ops.indexReadRecords(pNdb, pkIdxName, i,false,1) == 0);
if(i%bs==0 || i==c)
{
if(i<c)
{
if(ops.execute_NoCommit(pNdb, AO_IgnoreError)!=NDBT_OK)
{
g_err << "**** executeNoCommit should have succeeded" << endl;
result= NDBT_FAILED;
}
}
else
{
if(ops.execute_NoCommit(pNdb, AO_IgnoreError)!=289)
{
g_err << "**** executeNoCommit should have failed with 289"
<< endl;
result= NDBT_FAILED;
}
g_err << "NdbError.code= " <<
ops.getTransaction()->getNdbError().code << endl;
break;
}
}
}
CHECK(ops.closeTransaction(pNdb) == 0);
if(restarter.insertErrorInAllNodes(0) != 0)
{
g_err << "**** Failed to error insert(0)" << endl;
return NDBT_FAILED;
}
CHECK(ops.startTransaction(pNdb) == 0);
if (ops.indexReadRecords(pNdb, pkIdxName,0,0,rows) != 0){
g_err << "**** Index read failed" << endl;
return NDBT_FAILED;
}
CHECK(ops.closeTransaction(pNdb) == 0);
}
}
return result;
}
int
runBug28804(NDBT_Context* ctx, NDBT_Step* step)
{
return tcSaveINDX_test(ctx, step, 8039);
}
int
runBug28804_ATTRINFO(NDBT_Context* ctx, NDBT_Step* step)
{
return tcSaveINDX_test(ctx, step, 8051);
}
NDBT_TESTSUITE(testIndex);
TESTCASE("CreateAll",
"Test that we can create all various indexes on each table\n"
......@@ -1629,6 +1725,27 @@ TESTCASE("Bug25059",
STEP(runBug25059);
FINALIZER(createPkIndex_Drop);
}
TESTCASE("Bug28804",
"Test behaviour on out of TransactionBufferMemory for index lookup"){
TC_PROPERTY("LoggedIndexes", (unsigned)0);
INITIALIZER(runClearTable);
INITIALIZER(createPkIndex);
INITIALIZER(runLoadTable);
STEP(runBug28804);
FINALIZER(createPkIndex_Drop);
FINALIZER(runClearTable);
}
TESTCASE("Bug28804_ATTRINFO",
"Test behaviour on out of TransactionBufferMemory for index lookup"
" in saveINDXATTRINFO"){
TC_PROPERTY("LoggedIndexes", (unsigned)0);
INITIALIZER(runClearTable);
INITIALIZER(createPkIndex);
INITIALIZER(runLoadTable);
STEP(runBug28804_ATTRINFO);
FINALIZER(createPkIndex_Drop);
FINALIZER(runClearTable);
}
NDBT_TESTSUITE_END(testIndex);
int main(int argc, const char** argv){
......
......@@ -938,3 +938,10 @@ max-time: 120
cmd: testMgm
args: -n ApiMgmStructEventTimeout T1
max-time: 180
cmd: testIndex
args: -n Bug28804 T1 T3
max-time: 180
cmd: testIndex
args: -n Bug28804_ATTRINFO T1 T3
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment