Commit e0f32133 authored by tomas@whalegate.ndb.mysql.com's avatar tomas@whalegate.ndb.mysql.com

Merge whalegate.ndb.mysql.com:/home/tomas/mysql-5.1-telco-gca

into  whalegate.ndb.mysql.com:/home/tomas/mysql-5.1-new-ndb
parents c11806db 2606bb1c
......@@ -118,6 +118,8 @@
#define CFG_DB_O_DIRECT 168
#define CFG_DB_MAX_ALLOCATE 169
#define CFG_DB_SGA 198 /* super pool mem */
#define CFG_DB_DATA_MEM_2 199 /* used in special build in 5.1 */
......
......@@ -448,6 +448,41 @@ Backup::execDUMP_STATE_ORD(Signal* signal)
filePtr.p->m_flags);
}
}
ndbout_c("m_curr_disk_write_speed: %u m_words_written_this_period: %u m_overflow_disk_write: %u",
m_curr_disk_write_speed, m_words_written_this_period, m_overflow_disk_write);
ndbout_c("m_reset_delay_used: %u m_reset_disk_speed_time: %llu",
m_reset_delay_used, (Uint64)m_reset_disk_speed_time);
for(c_backups.first(ptr); ptr.i != RNIL; c_backups.next(ptr))
{
ndbout_c("BackupRecord %u: BackupId: %u MasterRef: %x ClientRef: %x",
ptr.i, ptr.p->backupId, ptr.p->masterRef, ptr.p->clientRef);
ndbout_c(" State: %u", ptr.p->slaveState.getState());
ndbout_c(" noOfByte: %llu noOfRecords: %llu",
ptr.p->noOfBytes, ptr.p->noOfRecords);
ndbout_c(" noOfLogBytes: %llu noOfLogRecords: %llu",
ptr.p->noOfLogBytes, ptr.p->noOfLogRecords);
ndbout_c(" errorCode: %u", ptr.p->errorCode);
BackupFilePtr filePtr;
for(ptr.p->files.first(filePtr); filePtr.i != RNIL;
ptr.p->files.next(filePtr))
{
ndbout_c(" file %u: type: %u flags: H'%x tableId: %u fragmentId: %u",
filePtr.i, filePtr.p->fileType, filePtr.p->m_flags,
filePtr.p->tableId, filePtr.p->fragmentNo);
}
if (ptr.p->slaveState.getState() == SCANNING && ptr.p->dataFilePtr != RNIL)
{
c_backupFilePool.getPtr(filePtr, ptr.p->dataFilePtr);
OperationRecord & op = filePtr.p->operation;
Uint32 *tmp = NULL;
Uint32 sz = 0;
bool eof = FALSE;
bool ready = op.dataBuffer.getReadPtr(&tmp, &sz, &eof);
ndbout_c("ready: %s eof: %s", ready ? "TRUE" : "FALSE", eof ? "TRUE" : "FALSE");
}
}
return;
}
if(signal->theData[0] == 24){
/**
......
......@@ -2700,6 +2700,7 @@ private:
ArrayPool<Page> c_page_pool;
Uint32 cnoOfAllocatedPages;
Uint32 m_max_allocate_pages;
Tablerec *tablerec;
Uint32 cnoOfTablerec;
......
......@@ -305,6 +305,12 @@ void Dbtup::execREAD_CONFIG_REQ(Signal* signal)
Uint32 noOfTriggers= 0;
Uint32 tmp= 0;
if (ndb_mgm_get_int_parameter(p, CFG_DB_MAX_ALLOCATE, &tmp))
tmp = 32 * 1024 * 1024;
m_max_allocate_pages = (tmp + GLOBAL_PAGE_SIZE - 1) / GLOBAL_PAGE_SIZE;
tmp = 0;
ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_TUP_PAGE_RANGE, &tmp));
initPageRangeSize(tmp);
ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_TUP_TABLE, &cnoOfTablerec));
......
......@@ -432,6 +432,11 @@ void Dbtup::allocMoreFragPages(Fragrecord* const regFragPtr)
// We will grow by 18.75% plus two more additional pages to grow
// a little bit quicker in the beginning.
/* -----------------------------------------------------------------*/
if (noAllocPages > m_max_allocate_pages)
{
noAllocPages = m_max_allocate_pages;
}
Uint32 allocated = allocFragPages(regFragPtr, noAllocPages);
regFragPtr->noOfPagesToGrow += allocated;
}//Dbtup::allocMoreFragPages()
......
......@@ -1321,6 +1321,18 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
"0",
STR_VALUE(MAX_INT_RNIL) },
{
CFG_DB_MAX_ALLOCATE,
"MaxAllocate",
DB_TOKEN,
"Maximum size of allocation to use when allocating memory for tables",
ConfigInfo::CI_USED,
false,
ConfigInfo::CI_INT,
"32M",
"1M",
"1G" },
{
CFG_DB_MEMREPORT_FREQUENCY,
"MemReportFrequency",
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment