Commit 6566af8c authored by unknown's avatar unknown

Merge tulin@bk-internal.mysql.com:/home/bk/mysql-4.1-ndb

into poseidon.(none):/home/tomas/mysql-4.1-ndb


ndb/src/kernel/vm/Configuration.cpp:
  Auto merged
parents 3212cc47 68750ccf
......@@ -5,7 +5,7 @@ Next DBACC 3001
Next DBTUP 4007
Next DBLQH 5040
Next DBDICT 6006
Next DBDIH 7173
Next DBDIH 7174
Next DBTC 8035
Next CMVMI 9000
Next BACKUP 10022
......@@ -387,6 +387,11 @@ Backup Stuff:
5028: Crash when receiving LQHKEYREQ (in non-master)
Failed Create Table:
--------------------
7173: Create table failed due to not sufficient number of fragment or
replica records.
Drop Table/Index:
-----------------
4001: Crash on REL_TABMEMREQ in TUP
......
......@@ -6425,6 +6425,10 @@ void Dbdih::execDIADDTABREQ(Signal* signal)
tabPtr.p->totalfragments = noFragments;
ndbrequire(noReplicas == cnoReplicas); // Only allowed
if (ERROR_INSERTED(7173)) {
addtabrefuseLab(signal, connectPtr, ZREPLERROR1);
return;
}
if ((noReplicas * noFragments) > cnoFreeReplicaRec) {
jam();
addtabrefuseLab(signal, connectPtr, ZREPLERROR1);
......@@ -6736,6 +6740,7 @@ void Dbdih::tableDeleteLab(Signal* signal, FileRecordPtr filePtr)
void Dbdih::releaseTable(TabRecordPtr tabPtr)
{
FragmentstorePtr fragPtr;
if (tabPtr.p->noOfFragChunks > 0) {
for (Uint32 fragId = 0; fragId < tabPtr.p->totalfragments; fragId++) {
jam();
getFragstore(tabPtr.p, fragId, fragPtr);
......@@ -6743,6 +6748,7 @@ void Dbdih::releaseTable(TabRecordPtr tabPtr)
releaseReplicas(fragPtr.p->oldStoredReplicas);
}//for
releaseFragments(tabPtr);
}
if (tabPtr.p->tabFile[0] != RNIL) {
jam();
releaseFile(tabPtr.p->tabFile[0]);
......@@ -6875,9 +6881,6 @@ Uint32 Dbdih::extractNodeInfo(const Fragmentstore * fragPtr, Uint32 nodes[])
return nodeCount;
}//Dbdih::extractNodeInfo()
#define NO_OF_FRAGS_PER_CHUNK 16
#define LOG_NO_OF_FRAGS_PER_CHUNK 4
void
Dbdih::getFragstore(TabRecord * tab, //In parameter
Uint32 fragNo, //In parameter
......
......@@ -643,8 +643,12 @@ Configuration::calcSizeAlt(ConfigValues * ownConfig){
cfg.put(CFG_DIH_CONNECT,
noOfOperations + noOfTransactions + 46);
Uint32 noFragPerTable= ((noOfDBNodes + NO_OF_FRAGS_PER_CHUNK - 1) >>
LOG_NO_OF_FRAGS_PER_CHUNK) <<
LOG_NO_OF_FRAGS_PER_CHUNK;
cfg.put(CFG_DIH_FRAG_CONNECT,
NO_OF_FRAG_PER_NODE * noOfMetaTables * noOfDBNodes);
noFragPerTable * noOfMetaTables);
int temp;
temp = noOfReplicas - 2;
......
......@@ -150,6 +150,13 @@
#define NO_OF_FRAG_PER_NODE 1
#define MAX_FRAG_PER_NODE 8
/**
* DIH allocates fragments in chunk for fast find of fragment record.
* These parameters define chunk size and log of chunk size.
*/
#define NO_OF_FRAGS_PER_CHUNK 8
#define LOG_NO_OF_FRAGS_PER_CHUNK 3
/* ---------------------------------------------------------------- */
// To avoid synching too big chunks at a time we synch after writing
// a certain number of data/UNDO pages. (e.g. 2 MBytes).
......
......@@ -1002,11 +1002,13 @@ int runGetPrimaryKey(NDBT_Context* ctx, NDBT_Step* step){
return result;
}
int
struct ErrorCodes { int error_id; bool crash;};
ErrorCodes
NF_codes[] = {
6003
,6004
//,6005
{6003, true},
{6004, true},
//,6005, true,
{7173, false}
};
int
......@@ -1042,7 +1044,9 @@ runNF1(NDBT_Context* ctx, NDBT_Step* step){
for(int i = 0; i<sz; i++){
int rand = myRandom48(restarter.getNumDbNodes());
int nodeId = restarter.getRandomNotMasterNodeId(rand);
int error = NF_codes[i];
struct ErrorCodes err_struct = NF_codes[i];
int error = err_struct.error_id;
bool crash = err_struct.crash;
g_info << "NF1: node = " << nodeId << " error code = " << error << endl;
......@@ -1057,6 +1061,7 @@ runNF1(NDBT_Context* ctx, NDBT_Step* step){
CHECK2(dict->createTable(* pTab) == 0,
"failed to create table");
if (crash) {
CHECK2(restarter.waitNodesNoStart(&nodeId, 1) == 0,
"waitNodesNoStart failed");
......@@ -1084,6 +1089,7 @@ runNF1(NDBT_Context* ctx, NDBT_Step* step){
"Failed to set LCP to min value");
}
}
}
end:
dict->dropTable(pTab->getName());
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment