Commit 2ef4c0a2 authored by tomas@poseidon.mysql.com's avatar tomas@poseidon.mysql.com

Merge tulin@bk-internal.mysql.com:/home/bk/mysql-5.1

into  poseidon.mysql.com:/home/tomas/mysql-5.1-new-ndb
parents 30d927da bfb06141
......@@ -2479,8 +2479,16 @@ Dblqh::execREMOVE_MARKER_ORD(Signal* signal)
CommitAckMarkerPtr removedPtr;
m_commitAckMarkerHash.remove(removedPtr, key);
#if defined VM_TRACE || defined ERROR_INSERT
ndbrequire(removedPtr.i != RNIL);
m_commitAckMarkerPool.release(removedPtr);
#else
if (removedPtr.i != RNIL)
{
jam();
m_commitAckMarkerPool.release(removedPtr);
}
#endif
#ifdef MARKER_TRACE
ndbout_c("Rem marker[%.8x %.8x]", key.transid1, key.transid2);
#endif
......@@ -3138,20 +3146,23 @@ void Dblqh::lqhAttrinfoLab(Signal* signal, Uint32* dataPtr, Uint32 length)
{
TcConnectionrec * const regTcPtr = tcConnectptr.p;
if (regTcPtr->operation != ZREAD) {
if (regTcPtr->opExec != 1) {
if (saveTupattrbuf(signal, dataPtr, length) == ZOK) {
;
} else {
jam();
if (regTcPtr->operation != ZDELETE)
{
if (regTcPtr->opExec != 1) {
if (saveTupattrbuf(signal, dataPtr, length) == ZOK) {
;
} else {
jam();
/* ------------------------------------------------------------------------- */
/* WE MIGHT BE WAITING FOR RESPONSE FROM SOME BLOCK HERE. THUS WE NEED TO */
/* GO THROUGH THE STATE MACHINE FOR THE OPERATION. */
/* ------------------------------------------------------------------------- */
localAbortStateHandlerLab(signal);
return;
localAbortStateHandlerLab(signal);
return;
}//if
}//if
}//if
}//if
}
c_tup->receive_attrinfo(signal, regTcPtr->tupConnectrec, dataPtr, length);
}//Dblqh::lqhAttrinfoLab()
......@@ -3405,7 +3416,7 @@ void Dblqh::execLQHKEYREQ(Signal* signal)
markerPtr.p->tcNodeId = tcNodeId;
CommitAckMarkerPtr tmp;
#ifdef VM_TRACE
#if defined VM_TRACE || defined ERROR_INSERT
#ifdef MARKER_TRACE
ndbout_c("Add marker[%.8x %.8x]", markerPtr.p->transid1, markerPtr.p->transid2);
#endif
......@@ -9629,7 +9640,7 @@ Uint32 Dblqh::initScanrec(const ScanFragReq* scanFragReq)
active.add(scanptr);
if(scanptr.p->scanKeyinfoFlag){
jam();
#ifdef VM_TRACE
#if defined VM_TRACE || defined ERROR_INSERT
ScanRecordPtr tmp;
ndbrequire(!c_scanTakeOverHash.find(tmp, * scanptr.p));
#endif
......@@ -9753,7 +9764,7 @@ void Dblqh::finishScanrec(Signal* signal)
scans.add(restart);
if(restart.p->scanKeyinfoFlag){
jam();
#ifdef VM_TRACE
#if defined VM_TRACE || defined ERROR_INSERT
ScanRecordPtr tmp;
ndbrequire(!c_scanTakeOverHash.find(tmp, * restart.p));
#endif
......
......@@ -2824,6 +2824,12 @@ void Dbtc::execTCKEYREQ(Signal* signal)
tmp.p->apiNodeId = refToNode(regApiPtr->ndbapiBlockref);
tmp.p->apiConnectPtr = TapiIndex;
tmp.p->noOfLqhs = 0;
#if defined VM_TRACE || defined ERROR_INSERT
{
CommitAckMarkerPtr check;
ndbrequire(!m_commitAckMarkerHash.find(check, *tmp.p));
}
#endif
m_commitAckMarkerHash.add(tmp);
}
}
......@@ -8114,6 +8120,13 @@ void Dbtc::initApiConnectFail(Signal* signal)
tmp.p->noOfLqhs = 1;
tmp.p->lqhNodeId[0] = tnodeid;
tmp.p->apiConnectPtr = apiConnectptr.i;
#if defined VM_TRACE || defined ERROR_INSERT
{
CommitAckMarkerPtr check;
ndbrequire(!m_commitAckMarkerHash.find(check, *tmp.p));
}
#endif
m_commitAckMarkerHash.add(tmp);
}
}//Dbtc::initApiConnectFail()
......@@ -8270,6 +8283,12 @@ void Dbtc::updateApiStateFail(Signal* signal)
tmp.p->noOfLqhs = 1;
tmp.p->lqhNodeId[0] = tnodeid;
tmp.p->apiConnectPtr = apiConnectptr.i;
#if defined VM_TRACE || defined ERROR_INSERT
{
CommitAckMarkerPtr check;
ndbrequire(!m_commitAckMarkerHash.find(check, *tmp.p));
}
#endif
m_commitAckMarkerHash.add(tmp);
} else {
jam();
......
......@@ -1735,7 +1735,8 @@ private:
Operationrec* regOperPtr,
Fragrecord* regFragPtr,
Tablerec* regTabPtr,
KeyReqStruct* req_struct);
KeyReqStruct* req_struct,
bool disk);
//------------------------------------------------------------------
//------------------------------------------------------------------
......
......@@ -814,7 +814,9 @@ void Dbtup::execTUPKEYREQ(Signal* signal)
{
jam();
if (handleDeleteReq(signal, regOperPtr,
regFragPtr, regTabPtr, &req_struct) == -1) {
regFragPtr, regTabPtr,
&req_struct,
disk_page != RNIL) == -1) {
return;
}
/*
......@@ -1458,7 +1460,8 @@ int Dbtup::handleDeleteReq(Signal* signal,
Operationrec* regOperPtr,
Fragrecord* regFragPtr,
Tablerec* regTabPtr,
KeyReqStruct *req_struct)
KeyReqStruct *req_struct,
bool disk)
{
// delete must set but not increment tupVersion
if (!regOperPtr->is_first_operation())
......@@ -1510,8 +1513,11 @@ int Dbtup::handleDeleteReq(Signal* signal,
{
return 0;
}
return handleReadReq(signal, regOperPtr, regTabPtr, req_struct);
if (setup_read(req_struct, regOperPtr, regFragPtr, regTabPtr, disk))
{
return handleReadReq(signal, regOperPtr, regTabPtr, req_struct);
}
error:
tupkeyErrorLab(signal);
......
......@@ -3525,8 +3525,10 @@ void Qmgr::execCOMMIT_FAILREQ(Signal* signal)
nodePtr.p->phase = ZFAIL_CLOSING;
nodePtr.p->failState = WAITING_FOR_NDB_FAILCONF;
setNodeInfo(nodePtr.i).m_heartbeat_cnt= 0;
setNodeInfo(nodePtr.i).m_version = 0;
c_clusterNodes.clear(nodePtr.i);
}//for
recompute_version_info(NodeInfo::DB);
/*----------------------------------------------------------------------*/
/* WE INFORM THE API'S WE HAVE CONNECTED ABOUT THE FAILED NODES. */
/*----------------------------------------------------------------------*/
......
......@@ -1611,17 +1611,24 @@ NdbEventBuffer::insert_event(NdbEventOperationImpl* impl,
Uint32 &oid_ref)
{
NdbEventOperationImpl *dropped_ev_op = m_dropped_ev_op;
DBUG_PRINT("info", ("gci: %u", data.gci));
do
{
do
{
oid_ref = impl->m_oid;
insertDataL(impl, &data, ptr);
if (impl->m_node_bit_mask.get(0u))
{
oid_ref = impl->m_oid;
insertDataL(impl, &data, ptr);
}
NdbEventOperationImpl* blob_op = impl->theBlobOpList;
while (blob_op != NULL)
{
oid_ref = blob_op->m_oid;
insertDataL(blob_op, &data, ptr);
if (blob_op->m_node_bit_mask.get(0u))
{
oid_ref = blob_op->m_oid;
insertDataL(blob_op, &data, ptr);
}
blob_op = blob_op->m_next;
}
} while((impl = impl->m_next));
......@@ -1806,6 +1813,7 @@ NdbEventBuffer::insertDataL(NdbEventOperationImpl *op,
switch (operation)
{
case NdbDictionary::Event::_TE_NODE_FAILURE:
DBUG_ASSERT(op->m_node_bit_mask.get(0u) != 0);
op->m_node_bit_mask.clear(SubTableData::getNdbdNodeId(ri));
DBUG_PRINT("info",
("_TE_NODE_FAILURE: m_ref_count: %u for op: %p id: %u",
......@@ -1821,29 +1829,23 @@ NdbEventBuffer::insertDataL(NdbEventOperationImpl *op,
DBUG_RETURN_EVENT(0);
break;
case NdbDictionary::Event::_TE_CLUSTER_FAILURE:
if (op->m_node_bit_mask.get(0))
{
op->m_node_bit_mask.clear();
DBUG_ASSERT(op->m_ref_count > 0);
// remove kernel reference
// added in execute_nolock
op->m_ref_count--;
DBUG_PRINT("info", ("_TE_CLUSTER_FAILURE: m_ref_count: %u for op: %p",
op->m_ref_count, op));
if (op->theMainOp)
{
DBUG_ASSERT(op->m_ref_count == 0);
DBUG_ASSERT(op->theMainOp->m_ref_count > 0);
// remove blob reference in main op
// added in execute_no_lock
op->theMainOp->m_ref_count--;
DBUG_PRINT("info", ("m_ref_count: %u for op: %p",
op->theMainOp->m_ref_count, op->theMainOp));
}
}
else
DBUG_ASSERT(op->m_node_bit_mask.get(0u) != 0);
op->m_node_bit_mask.clear();
DBUG_ASSERT(op->m_ref_count > 0);
// remove kernel reference
// added in execute_nolock
op->m_ref_count--;
DBUG_PRINT("info", ("_TE_CLUSTER_FAILURE: m_ref_count: %u for op: %p",
op->m_ref_count, op));
if (op->theMainOp)
{
DBUG_ASSERT(op->m_node_bit_mask.isclear() != 0);
DBUG_ASSERT(op->m_ref_count == 0);
DBUG_ASSERT(op->theMainOp->m_ref_count > 0);
// remove blob reference in main op
// added in execute_no_lock
op->theMainOp->m_ref_count--;
DBUG_PRINT("info", ("m_ref_count: %u for op: %p",
op->theMainOp->m_ref_count, op->theMainOp));
}
break;
case NdbDictionary::Event::_TE_STOP:
......
......@@ -1272,6 +1272,52 @@ runBug25090(NDBT_Context* ctx, NDBT_Step* step){
return NDBT_OK;
}
int
runDeleteRead(NDBT_Context* ctx, NDBT_Step* step){
Ndb* pNdb = GETNDB(step);
const NdbDictionary::Table* tab = ctx->getTab();
NDBT_ResultRow row(*ctx->getTab());
HugoTransactions tmp(*ctx->getTab());
int a;
int loops = ctx->getNumLoops();
const int rows = ctx->getNumRecords();
while (loops--)
{
NdbTransaction* pTrans = pNdb->startTransaction();
NdbOperation* pOp = pTrans->getNdbOperation(tab->getName());
pOp->deleteTuple();
for(a = 0; a<tab->getNoOfColumns(); a++)
{
if (tab->getColumn(a)->getPrimaryKey() == true)
{
if(tmp.equalForAttr(pOp, a, 0) != 0)
{
ERR(pTrans->getNdbError());
return NDBT_FAILED;
}
}
}
// Define attributes to read
for(a = 0; a<tab->getNoOfColumns(); a++)
{
if((row.attributeStore(a) = pOp->getValue(tab->getColumn(a)->getName())) == 0) {
ERR(pTrans->getNdbError());
return NDBT_FAILED;
}
}
pTrans->execute(Commit);
pTrans->close();
}
return NDBT_OK;
}
NDBT_TESTSUITE(testBasic);
TESTCASE("PkInsert",
"Verify that we can insert and delete from this table using PK"
......@@ -1542,6 +1588,12 @@ TESTCASE("Bug25090",
"Verify what happens when we fill the db" ){
STEP(runBug25090);
}
TESTCASE("DeleteRead",
"Verify Delete+Read" ){
INITIALIZER(runLoadTable);
INITIALIZER(runDeleteRead);
FINALIZER(runClearTable2);
}
NDBT_TESTSUITE_END(testBasic);
#if 0
......
......@@ -79,6 +79,10 @@ max-time: 660
cmd: testBasic
args: -n UpdateAndRead
max-time: 500
cmd: testBasic
args: -n DeleteRead
max-time: 500
cmd: testBasic
args: -n PkReadAndLocker T6 D1 D2
......@@ -461,7 +465,7 @@ max-time: 500
cmd: testScan
args: -n Bug24447 T1
max-time: 500
max-time: 1000
cmd: testScan
args: -n ScanVariants
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment