Commit d9c51c61 authored by jonas@perch.ndb.mysql.com's avatar jonas@perch.ndb.mysql.com

Merge perch.ndb.mysql.com:/home/jonas/src/mysql-4.1

into  perch.ndb.mysql.com:/home/jonas/src/mysql-4.1-ndb
parents 5dded174 62e9abf0
......@@ -28,6 +28,14 @@
class File_class
{
public:
/**
* Returns time for last contents modification of a file.
*
* @param aFileName a filename to check.
* @return the time for last contents modificaton of the file.
*/
static time_t mtime(const char* aFileName);
/**
* Returns true if the file exist.
*
......
......@@ -49,7 +49,7 @@ my_bool opt_core;
{ "ndb-connectstring", OPT_NDB_CONNECTSTRING, \
"Set connect string for connecting to ndb_mgmd. " \
"Syntax: \"[nodeid=<id>;][host=]<hostname>[:<port>]\". " \
"Overides specifying entries in NDB_CONNECTSTRING and Ndb.cfg", \
"Overrides specifying entries in NDB_CONNECTSTRING and my.cnf", \
(gptr*) &opt_connect_str, (gptr*) &opt_connect_str, 0, \
GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0 },\
{ "ndb-shm", OPT_NDB_SHM,\
......
......@@ -147,6 +147,7 @@ FileLogHandler::createNewFile()
bool rc = true;
int fileNo = 1;
char newName[PATH_MAX];
time_t newMtime, preMtime = 0;
do
{
......@@ -159,7 +160,15 @@ FileLogHandler::createNewFile()
}
BaseString::snprintf(newName, sizeof(newName),
"%s.%d", m_pLogFile->getName(), fileNo++);
newMtime = File_class::mtime(newName);
if (newMtime < preMtime)
{
break;
}
else
{
preMtime = newMtime;
}
} while (File_class::exists(newName));
m_pLogFile->close();
......
......@@ -24,6 +24,18 @@
//
// PUBLIC
//
time_t
File_class::mtime(const char* aFileName)
{
MY_STAT stmp;
time_t rc = 0;
if (my_stat(aFileName, &stmp, MYF(0)) != NULL) {
rc = stmp.st_mtime;
}
return rc;
}
bool
File_class::exists(const char* aFileName)
......
......@@ -2313,7 +2313,8 @@ void Dbdict::checkSchemaStatus(Signal* signal)
tablePtr.p->tableType = (DictTabInfo::TableType)oldEntry->m_tableType;
// On NR get index from master because index state is not on file
const bool file = c_systemRestart || tablePtr.p->isTable();
const bool file = (* newEntry == * oldEntry) &&
(c_systemRestart || tablePtr.p->isTable());
restartCreateTab(signal, tableId, oldEntry, file);
return;
......
......@@ -1044,6 +1044,8 @@ private:
void removeStoredReplica(FragmentstorePtr regFragptr,
ReplicaRecordPtr replicaPtr);
void searchStoredReplicas(FragmentstorePtr regFragptr);
bool setup_create_replica(FragmentstorePtr, CreateReplicaRecord*,
ConstPtr<ReplicaRecord>);
void updateNodeInfo(FragmentstorePtr regFragptr);
//------------------------------------
......
This diff is collapsed.
......@@ -984,13 +984,6 @@ Dbtc::handleFailedApiNode(Signal* signal,
TloopCount += 64;
break;
case CS_CONNECTED:
/*********************************************************************/
// The api record is connected to failed node. We need to release the
// connection and set it in a disconnected state.
/*********************************************************************/
jam();
releaseApiCon(signal, apiConnectptr.i);
break;
case CS_REC_COMMITTING:
case CS_RECEIVING:
case CS_STARTED:
......
This diff is collapsed.
......@@ -27,7 +27,7 @@
verify delete
Arguments:
-f Location of Ndb.cfg file, default Ndb.cfg
-f Location of my.cnf file, default my.cnf
-t Number of threads to start, default 1
-o Number of operations per loop, default 500 -l Number of loops to run, default 1, 0=infinite
-a Number of attributes, default 25
......@@ -829,7 +829,7 @@ static int createTables(Ndb* pMyNdb)
static void printUsage()
{
ndbout << "Usage of flexScan:" << endl;
ndbout << "-f <path> Location of Ndb.cfg file, default: Ndb.cfg" << endl;
ndbout << "-f <path> Location of my.cnf file, default: my.cnf" << endl;
ndbout << "-t <int> Number of threads to start, default 1" << endl;
ndbout << "-o <int> Number of operations per loop, default 500" << endl;
ndbout << "-l <int> Number of loops to run, default 1, 0=infinite" << endl;
......
......@@ -64,7 +64,7 @@ static struct my_option my_long_options[] =
{ "ndb-connectstring", 256,
"Set connect string for connecting to ndb_mgmd. "
"Syntax: \"[nodeid=<id>;][host=]<hostname>[:<port>]\". "
"Overides specifying entries in NDB_CONNECTSTRING and Ndb.cfg",
"Overrides specifying entries in NDB_CONNECTSTRING and my.cnf",
(gptr*) &g_connectstring, (gptr*) &g_connectstring,
0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0 },
{ "nodes", 256, "Print nodes",
......
......@@ -57,7 +57,7 @@ if(@ARGV < 3 || $ARGV[0] eq '--usage' || $ARGV[0] eq '--help')
$template->param(dsn => $dsn);
}
my @releases = ({rel=>'4.1'},{rel=>'5.0'},{rel=>'5.1'});
my @releases = ({rel=>'4.1'},{rel=>'5.0'},{rel=>'5.1'}); #,{rel=>'5.1-dd'});
$template->param(releases => \@releases);
my $tables = $dbh->selectall_arrayref("show tables");
......@@ -81,25 +81,29 @@ sub align {
return @aligned;
}
foreach(@{$tables})
{
my $table= @{$_}[0];
sub do_table {
my $table= shift;
my $info= shift;
my %indexes= %{$_[0]};
my @count= @{$_[1]};
my @columns;
my $info= $dbh->selectall_hashref('describe `'.$table.'`',"Field");
my @count = $dbh->selectrow_array('select count(*) from `'.$table.'`');
my %columnsize; # used for index calculations
# We now work out the DataMemory usage
# sizes for 4.1, 5.0, 5.1
my @totalsize= (0,0,0);
# sizes for 4.1, 5.0, 5.1 and 5.1-dd
my @totalsize= (0,0,0,0);
@totalsize= @totalsize[0..$#releases]; # limit to releases we're outputting
my $nrvarsize= 0;
foreach(keys %$info)
{
my @realsize = (0,0,0);
my @realsize = (0,0,0,0);
my @varsize = (0,0,0,0);
my $type;
my $size;
my $name= $_;
my $is_varsize= 0;
if($$info{$_}{Type} =~ /^(.*?)\((\d+)\)/)
{
......@@ -112,54 +116,86 @@ foreach(@{$tables})
}
if($type =~ /tinyint/)
{@realsize=(1,1,1)}
{@realsize=(1,1,1,1)}
elsif($type =~ /smallint/)
{@realsize=(2,2,2)}
{@realsize=(2,2,2,2)}
elsif($type =~ /mediumint/)
{@realsize=(3,3,3)}
{@realsize=(3,3,3,3)}
elsif($type =~ /bigint/)
{@realsize=(8,8,8)}
{@realsize=(8,8,8,8)}
elsif($type =~ /int/)
{@realsize=(4,4,4)}
{@realsize=(4,4,4,4)}
elsif($type =~ /float/)
{
if($size<=24)
{@realsize=(4,4,4)}
{@realsize=(4,4,4,4)}
else
{@realsize=(8,8,8)}
{@realsize=(8,8,8,8)}
}
elsif($type =~ /double/ || $type =~ /real/)
{@realsize=(8,8,8)}
{@realsize=(8,8,8,8)}
elsif($type =~ /bit/)
{
my $a=($size+7)/8;
@realsize = ($a,$a,$a);
@realsize = ($a,$a,$a,$a);
}
elsif($type =~ /datetime/)
{@realsize=(8,8,8)}
{@realsize=(8,8,8,8)}
elsif($type =~ /timestamp/)
{@realsize=(4,4,4)}
{@realsize=(4,4,4,4)}
elsif($type =~ /date/ || $type =~ /time/)
{@realsize=(3,3,3)}
{@realsize=(3,3,3,3)}
elsif($type =~ /year/)
{@realsize=(1,1,1)}
{@realsize=(1,1,1,1)}
elsif($type =~ /varchar/ || $type =~ /varbinary/)
{
my $fixed= 1+$size;
my $fixed=$size+ceil($size/256);
my @dynamic=$dbh->selectrow_array("select avg(length(`"
.$name
."`)) from `".$table.'`');
$dynamic[0]=0 if !$dynamic[0];
@realsize= ($fixed,$fixed,ceil($dynamic[0]));
$dynamic[0]+=ceil($dynamic[0]/256); # size bit
$nrvarsize++;
$is_varsize= 1;
$varsize[3]= ceil($dynamic[0]);
@realsize= ($fixed,$fixed,ceil($dynamic[0]),$fixed);
}
elsif($type =~ /binary/ || $type =~ /char/)
{@realsize=($size,$size,$size)}
{@realsize=($size,$size,$size,$size)}
elsif($type =~ /text/ || $type =~ /blob/)
{
@realsize=(256,256,1);
$NoOfTables[$_]{val} += 1 foreach 0..$#releases; # blob uses table
} # FIXME check if 5.1 is correct
@realsize=(8+256,8+256,8+256,8+256);
my $blobhunk= 2000;
$blobhunk= 8000 if $type=~ /longblob/;
$blobhunk= 4000 if $type=~ /mediumblob/;
my @blobsize=$dbh->selectrow_array("select SUM(CEILING(".
"length(`$name`)/$blobhunk))".
"from `".$table."`");
$blobsize[0]=0 if !defined($blobsize[0]);
#$NoOfTables[$_]{val} += 1 foreach 0..$#releases; # blob uses table
do_table($table."\$BLOB_$name",
{'PK'=>{Type=>'int'},
'DIST'=>{Type=>'int'},
'PART'=>{Type=>'int'},
'DATA'=>{Type=>"binary($blobhunk)"}
},
{'PRIMARY' => {
'unique' => 1,
'comment' => '',
'columns' => [
'PK',
'DIST',
'PART',
],
'type' => 'HASH'
}
},
\@blobsize);
}
@realsize= @realsize[0..$#releases];
@realsize= align(4,@realsize);
$totalsize[$_]+=$realsize[$_] foreach 0..$#totalsize;
......@@ -170,6 +206,7 @@ foreach(@{$tables})
push @columns, {
name=>$name,
type=>$type,
is_varsize=>$is_varsize,
size=>$size,
key=>$$info{$_}{Key},
datamemory=>\@realout,
......@@ -183,24 +220,10 @@ foreach(@{$tables})
# Firstly, we assemble some information about the indexes.
# We use SHOW INDEX instead of using INFORMATION_SCHEMA so
# we can still connect to pre-5.0 mysqlds.
my %indexes;
{
my $sth= $dbh->prepare("show index from `".$table.'`');
$sth->execute;
while(my $i = $sth->fetchrow_hashref)
{
$indexes{${%$i}{Key_name}}= {
type=>${%$i}{Index_type},
unique=>!${%$i}{Non_unique},
comment=>${%$i}{Comment},
} if !defined($indexes{${%$i}{Key_name}});
$indexes{${%$i}{Key_name}}{columns}[${%$i}{Seq_in_index}-1]=
${%$i}{Column_name};
}
}
if(!defined($indexes{PRIMARY})) {
my @usage= ({val=>8},{val=>8},{val=>8},{val=>8});
@usage= @usage[0..$#releases];
$indexes{PRIMARY}= {
type=>'BTREE',
unique=>1,
......@@ -212,20 +235,22 @@ foreach(@{$tables})
type=>'bigint',
size=>8,
key=>'PRI',
datamemory=>[{val=>8},{val=>8},{val=>8}],
datamemory=>\@usage,
};
$columnsize{'HIDDEN_NDB_PKEY'}= [8,8,8];
}
my @IndexDataMemory= ({val=>0},{val=>0},{val=>0});
my @RowIndexMemory= ({val=>0},{val=>0},{val=>0});
my @IndexDataMemory= ({val=>0},{val=>0},{val=>0},{val=>0});
my @RowIndexMemory= ({val=>0},{val=>0},{val=>0},{val=>0});
@IndexDataMemory= @IndexDataMemory[0..$#releases];
@RowIndexMemory= @RowIndexMemory[0..$#releases];
my @indexes;
foreach my $index (keys %indexes) {
my $im41= 25;
$im41+=$columnsize{$_}[0] foreach @{$indexes{$index}{columns}};
my @im = ({val=>$im41},{val=>25},{val=>25});
my @dm = ({val=>10},{val=>10},{val=>10});
my @im = ({val=>$im41},{val=>25},{val=>25}); #,{val=>25});
my @dm = ({val=>10},{val=>10},{val=>10}); #,{val=>10});
push @indexes, {
name=>$index,
type=>$indexes{$index}{type},
......@@ -233,13 +258,22 @@ foreach(@{$tables})
indexmemory=>\@im,
datamemory=>\@dm,
};
$IndexDataMemory[$_]{val}+=$dm[$_]{val} foreach 0..2;
$RowIndexMemory[$_]{val}+=$im[$_]{val} foreach 0..2;
$IndexDataMemory[$_]{val}+=$dm[$_]{val} foreach 0..$#releases;
$RowIndexMemory[$_]{val}+=$im[$_]{val} foreach 0..$#releases;
}
# total size + 16 bytes overhead
my @TotalDataMemory;
$TotalDataMemory[$_]{val}=$IndexDataMemory[$_]{val}+$totalsize[$_]+16 foreach 0..2;
my @RowOverhead = ({val=>16},{val=>16},{val=>16}); #,{val=>24});
# 5.1 has ptr to varsize page, and per-varsize overhead
my @nrvarsize_mem= ({val=>0},{val=>0},
{val=>8}); #,{val=>0});
{
my @a= align(4,$nrvarsize*2);
$nrvarsize_mem[2]{val}+=$a[0]+$nrvarsize*4;
}
$TotalDataMemory[$_]{val}=$IndexDataMemory[$_]{val}+$totalsize[$_]+$RowOverhead[$_]{val}+$nrvarsize_mem[$_]{val} foreach 0..$#releases;
my @RowDataMemory;
push @RowDataMemory,{val=>$_} foreach @totalsize;
......@@ -260,12 +294,18 @@ foreach(@{$tables})
my @counts;
$counts[$_]{val}= $count foreach 0..$#releases;
my @nrvarsize_rel= ({val=>0},{val=>0},
{val=>$nrvarsize}); #,{val=>0});
push @table_size, {
table=>$table,
indexes=>\@indexes,
columns=>\@columns,
count=>\@counts,
RowOverhead=>\@RowOverhead,
RowDataMemory=>\@RowDataMemory,
nrvarsize=>\@nrvarsize_rel,
nrvarsize_mem=>\@nrvarsize_mem,
releases=>\@releases,
IndexDataMemory=>\@IndexDataMemory,
TotalDataMemory=>\@TotalDataMemory,
......@@ -283,6 +323,31 @@ foreach(@{$tables})
$NoOfIndexes[$_]{val} += @indexes foreach 0..$#releases;
}
foreach(@{$tables})
{
my $table= @{$_}[0];
my $info= $dbh->selectall_hashref('describe `'.$table.'`',"Field");
my @count = $dbh->selectrow_array('select count(*) from `'.$table.'`');
my %indexes;
{
my $sth= $dbh->prepare("show index from `".$table.'`');
$sth->execute;
while(my $i = $sth->fetchrow_hashref)
{
$indexes{${%$i}{Key_name}}= {
type=>${%$i}{Index_type},
unique=>!${%$i}{Non_unique},
comment=>${%$i}{Comment},
} if !defined($indexes{${%$i}{Key_name}});
$indexes{${%$i}{Key_name}}{columns}[${%$i}{Seq_in_index}-1]=
${%$i}{Column_name};
}
}
do_table($table, $info, \%indexes, \@count);
}
my @NoOfTriggers;
# for unique hash indexes
$NoOfTriggers[$_]{val} += $NoOfIndexes[$_]{val}*3 foreach 0..$#releases;
......
......@@ -15,6 +15,8 @@ td,th { border: 1px solid black }
<p>This information should be valid for MySQL 4.1 and 5.0. Since 5.1 is not a final release yet, the numbers should be used as a guide only.</p>
<p>5.1-dd is for tables stored on disk. The ndb_size.pl estimates are <b>experimental</b> and should not be trusted. Notably we don't take into account indexed columns being in DataMemory versus non-indexed on disk.</p>
<h2>Parameter Settings</h2>
<p><b>NOTE</b> the configuration parameters below do not take into account system tables and other requirements.</p>
<table>
......@@ -69,6 +71,7 @@ td,th { border: 1px solid black }
<tr>
<th>Column</th>
<th>Type</th>
<th>VARSIZE</th>
<th>Size</th>
<th>Key</th>
<TMPL_LOOP NAME=releases>
......@@ -79,6 +82,7 @@ td,th { border: 1px solid black }
<tr>
<td><TMPL_VAR NAME=name></td>
<td><TMPL_VAR NAME=type></td>
<td><TMPL_IF NAME=is_varsize>YES<TMPL_ELSE>&nbsp;</TMPL_IF></td>
<td><TMPL_VAR NAME=size></td>
<td><TMPL_VAR NAME=key></td>
<TMPL_LOOP NAME=datamemory>
......@@ -128,10 +132,22 @@ td,th { border: 1px solid black }
<th><TMPL_VAR NAME=rel></th>
</TMPL_LOOP>
</tr>
<tr>
<th>Nr Varsized Attributes</th>
<TMPL_LOOP NAME=nrvarsize>
<td><TMPL_VAR NAME=val></td>
</TMPL_LOOP>
</tr>
<tr>
<th>Row Overhead</th>
<TMPL_LOOP NAME=releases>
<td>16</td>
<TMPL_LOOP NAME=RowOverhead>
<td><TMPL_VAR NAME=val></td>
</TMPL_LOOP>
</tr>
<tr>
<th>Varsized Overhead</th>
<TMPL_LOOP NAME=nrvarsize_mem>
<td><TMPL_VAR NAME=val></td>
</TMPL_LOOP>
</tr>
<tr>
......
......@@ -31,6 +31,7 @@ public:
virtual void logEntry(const LogEntry &){}
virtual void endOfLogEntrys(){}
virtual bool finalize_table(const TableS &){return true;}
virtual bool has_temp_error() {return false;}
};
#endif
......@@ -139,6 +139,11 @@ BackupRestore::finalize_table(const TableS & table){
return ret;
}
bool
BackupRestore::has_temp_error(){
return m_temp_error;
}
bool
BackupRestore::table(const TableS & table){
if (!m_restore && !m_restore_meta)
......@@ -437,6 +442,7 @@ bool BackupRestore::errorHandler(restore_callback_t *cb)
case NdbError::TemporaryError:
err << "Temporary error: " << error << endl;
m_temp_error = true;
NdbSleep_MilliSleep(sleepTime);
return true;
// RETRY
......
......@@ -41,6 +41,7 @@ public:
m_parallelism = parallelism;
m_callback = 0;
m_free_callback = 0;
m_temp_error = false;
m_transactions = 0;
m_cache.m_old_table = 0;
}
......@@ -60,6 +61,7 @@ public:
virtual void logEntry(const LogEntry &);
virtual void endOfLogEntrys();
virtual bool finalize_table(const TableS &);
virtual bool has_temp_error();
void connectToMysql();
Ndb * m_ndb;
bool m_restore;
......@@ -72,6 +74,7 @@ public:
restore_callback_t *m_callback;
restore_callback_t *m_free_callback;
bool m_temp_error;
/**
* m_new_table_ids[X] = Y;
......
......@@ -411,6 +411,17 @@ main(int argc, char** argv)
}
}
}
for(Uint32 i= 0; i < g_consumers.size(); i++)
{
if (g_consumers[i]->has_temp_error())
{
clearConsumers();
ndbout_c("\nRestore successful, but encountered temporary error, "
"please look at configuration.");
return NDBT_ProgramExit(NDBT_TEMPORARY);
}
}
clearConsumers();
return NDBT_ProgramExit(NDBT_OK);
} // main
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment