Commit 5f183053 authored by unknown's avatar unknown

Merge whalegate.ndb.mysql.com:/home/tomas/mysql-5.0

into  whalegate.ndb.mysql.com:/home/tomas/mysql-5.0-ndb-merge


sql/ha_ndbcluster.cc:
  Auto merged
parents 5c836d24 cc0750ac
DROP TABLE IF EXISTS t1;
CREATE TABLE `test` (
`id` INT NOT NULL AUTO_INCREMENT PRIMARY KEY ,
`t` VARCHAR( 10 ) NOT NULL
) ENGINE = ndbcluster;
GRANT USAGE ON *.* TO user1@localhost IDENTIFIED BY 'pass';
DROP TABLE `test`.`test`;
drop user user1@localhost;
......@@ -405,3 +405,22 @@ a b
1 1
10 10
drop table t2;
create table t1 (id int primary key) engine ndb;
insert into t1 values (1), (2), (3);
create table t2 (id int primary key) engine ndb;
insert into t2 select id from t1;
create trigger kaboom after delete on t1
for each row begin
delete from t2 where id=old.id;
end|
select * from t1 order by id;
id
1
2
3
delete from t1 where id in (1,2);
select * from t2 order by id;
id
3
drop trigger kaboom;
drop table t1;
-- source include/have_ndb.inc
--disable_warnings
DROP TABLE IF EXISTS t1;
--enable_warnings
CREATE TABLE `test` (
`id` INT NOT NULL AUTO_INCREMENT PRIMARY KEY ,
`t` VARCHAR( 10 ) NOT NULL
) ENGINE = ndbcluster;
# Add user1@localhost with a specific password
# and connect as that user
GRANT USAGE ON *.* TO user1@localhost IDENTIFIED BY 'pass';
connect (user1,localhost,user1,pass,*NO-ONE*);
# Run the query 100 times
disable_query_log;
disable_result_log;
let $i= 100;
while ($i)
{
select count(*) from information_schema.tables union all select count(*) from information_schema.tables union all select count(*) from information_schema.tables;
dec $i;
}
enable_query_log;
enable_result_log;
disconnect user1;
# Switch back to the default connection and cleanup
connection default;
DROP TABLE `test`.`test`;
drop user user1@localhost;
......@@ -291,3 +291,25 @@ insert into t2 values (1,1), (10,10);
select * from t2 use index (ab) where a in(1,10) order by a;
drop table t2;
#bug#30337
create table t1 (id int primary key) engine ndb;
insert into t1 values (1), (2), (3);
create table t2 (id int primary key) engine ndb;
insert into t2 select id from t1;
delimiter |;
create trigger kaboom after delete on t1
for each row begin
delete from t2 where id=old.id;
end|
delimiter ;|
select * from t1 order by id;
delete from t1 where id in (1,2);
select * from t2 order by id;
drop trigger kaboom;
drop table t1;
......@@ -6,7 +6,7 @@ Next DBTUP 4014
Next DBLQH 5043
Next DBDICT 6007
Next DBDIH 7183
Next DBTC 8039
Next DBTC 8052
Next CMVMI 9000
Next BACKUP 10022
Next DBUTIL 11002
......@@ -296,6 +296,10 @@ ABORT OF TCKEYREQ
8038 : Simulate API disconnect just after SCAN_TAB_REQ
8039 : Simulate failure of TransactionBufferMemory allocation for OI lookup
8051 : Simulate failure of allocation for saveINDXKEYINFO
CMVMI
-----
......
......@@ -1497,12 +1497,12 @@ private:
void clearCommitAckMarker(ApiConnectRecord * const regApiPtr,
TcConnectRecord * const regTcPtr);
// Trigger and index handling
bool saveINDXKEYINFO(Signal* signal,
int saveINDXKEYINFO(Signal* signal,
TcIndexOperation* indexOp,
const Uint32 *src,
Uint32 len);
bool receivedAllINDXKEYINFO(TcIndexOperation* indexOp);
bool saveINDXATTRINFO(Signal* signal,
int saveINDXATTRINFO(Signal* signal,
TcIndexOperation* indexOp,
const Uint32 *src,
Uint32 len);
......
......@@ -1789,9 +1789,18 @@ start_failure:
}//switch
}
static
inline
bool
compare_transid(Uint32* val0, Uint32* val1)
{
Uint32 tmp0 = val0[0] ^ val1[0];
Uint32 tmp1 = val0[1] ^ val1[1];
return (tmp0 | tmp1) == 0;
}
void Dbtc::execKEYINFO(Signal* signal)
{
UintR compare_transid1, compare_transid2;
jamEntry();
apiConnectptr.i = signal->theData[0];
tmaxData = 20;
......@@ -1801,10 +1810,8 @@ void Dbtc::execKEYINFO(Signal* signal)
}//if
ptrAss(apiConnectptr, apiConnectRecord);
ttransid_ptr = 1;
compare_transid1 = apiConnectptr.p->transid[0] ^ signal->theData[1];
compare_transid2 = apiConnectptr.p->transid[1] ^ signal->theData[2];
compare_transid1 = compare_transid1 | compare_transid2;
if (compare_transid1 != 0) {
if (compare_transid(apiConnectptr.p->transid, signal->theData+1) == false)
{
TCKEY_abort(signal, 19);
return;
}//if
......@@ -2105,7 +2112,6 @@ void Dbtc::saveAttrbuf(Signal* signal)
void Dbtc::execATTRINFO(Signal* signal)
{
UintR compare_transid1, compare_transid2;
UintR Tdata1 = signal->theData[0];
UintR Tlength = signal->length();
UintR TapiConnectFilesize = capiConnectFilesize;
......@@ -2120,17 +2126,13 @@ void Dbtc::execATTRINFO(Signal* signal)
return;
}//if
UintR Tdata2 = signal->theData[1];
UintR Tdata3 = signal->theData[2];
ApiConnectRecord * const regApiPtr = &localApiConnectRecord[Tdata1];
compare_transid1 = regApiPtr->transid[0] ^ Tdata2;
compare_transid2 = regApiPtr->transid[1] ^ Tdata3;
apiConnectptr.p = regApiPtr;
compare_transid1 = compare_transid1 | compare_transid2;
if (compare_transid1 != 0) {
if (compare_transid(regApiPtr->transid, signal->theData+1) == false)
{
DEBUG("Drop ATTRINFO, wrong transid, lenght="<<Tlength
<< " transid("<<hex<<Tdata2<<", "<<Tdata3);
<< " transid("<<hex<<signal->theData[1]<<", "<<signal->theData[2]);
TCKEY_abort(signal, 19);
return;
}//if
......@@ -5456,11 +5458,32 @@ void Dbtc::execTC_COMMITREQ(Signal* signal)
}
}//Dbtc::execTC_COMMITREQ()
/**
* TCROLLBACKREQ
*
* Format is:
*
* thedata[0] = apiconnectptr
* thedata[1] = transid[0]
* thedata[2] = transid[1]
* OPTIONAL thedata[3] = flags
*
* Flags:
* 0x1 = potentiallyBad data from API (try not to assert)
*/
void Dbtc::execTCROLLBACKREQ(Signal* signal)
{
bool potentiallyBad= false;
UintR compare_transid1, compare_transid2;
jamEntry();
if(unlikely((signal->getLength() >= 4) && (signal->theData[3] & 0x1)))
{
ndbout_c("Trying to roll back potentially bad txn\n");
potentiallyBad= true;
}
apiConnectptr.i = signal->theData[0];
if (apiConnectptr.i >= capiConnectFilesize) {
goto TC_ROLL_warning;
......@@ -5547,11 +5570,13 @@ void Dbtc::execTCROLLBACKREQ(Signal* signal)
TC_ROLL_warning:
jam();
if(likely(potentiallyBad==false))
warningHandlerLab(signal, __LINE__);
return;
TC_ROLL_system_error:
jam();
if(likely(potentiallyBad==false))
systemErrorLab(signal, __LINE__);
return;
}//Dbtc::execTCROLLBACKREQ()
......@@ -11559,6 +11584,7 @@ void Dbtc::execTCINDXREQ(Signal* signal)
// This is a newly started transaction, clean-up
releaseAllSeizedIndexOperations(regApiPtr);
regApiPtr->apiConnectstate = CS_STARTED;
regApiPtr->transid[0] = tcIndxReq->transId1;
regApiPtr->transid[1] = tcIndxReq->transId2;
}//if
......@@ -11599,20 +11625,29 @@ void Dbtc::execTCINDXREQ(Signal* signal)
Uint32 includedIndexLength = MIN(indexLength, indexBufSize);
indexOp->expectedAttrInfo = attrLength;
Uint32 includedAttrLength = MIN(attrLength, attrBufSize);
if (saveINDXKEYINFO(signal,
int ret;
if ((ret = saveINDXKEYINFO(signal,
indexOp,
dataPtr,
includedIndexLength)) {
includedIndexLength)) == 0)
{
jam();
// We have received all we need
readIndexTable(signal, regApiPtr, indexOp);
return;
}
else if (ret == -1)
{
jam();
return;
}
dataPtr += includedIndexLength;
if (saveINDXATTRINFO(signal,
indexOp,
dataPtr,
includedAttrLength)) {
includedAttrLength) == 0) {
jam();
// We have received all we need
readIndexTable(signal, regApiPtr, indexOp);
......@@ -11715,13 +11750,25 @@ void Dbtc::execINDXKEYINFO(Signal* signal)
TcIndexOperationPtr indexOpPtr;
TcIndexOperation* indexOp;
if (compare_transid(regApiPtr->transid, indxKeyInfo->transId) == false)
{
TCKEY_abort(signal, 19);
return;
}
if (regApiPtr->apiConnectstate == CS_ABORTING)
{
jam();
return;
}
if((indexOpPtr.i = regApiPtr->accumulatingIndexOp) != RNIL)
{
indexOp = c_theIndexOperationPool.getPtr(indexOpPtr.i);
if (saveINDXKEYINFO(signal,
indexOp,
src,
keyInfoLength)) {
keyInfoLength) == 0) {
jam();
// We have received all we need
readIndexTable(signal, regApiPtr, indexOp);
......@@ -11748,17 +11795,31 @@ void Dbtc::execINDXATTRINFO(Signal* signal)
TcIndexOperationPtr indexOpPtr;
TcIndexOperation* indexOp;
if (compare_transid(regApiPtr->transid, indxAttrInfo->transId) == false)
{
TCKEY_abort(signal, 19);
return;
}
if (regApiPtr->apiConnectstate == CS_ABORTING)
{
jam();
return;
}
if((indexOpPtr.i = regApiPtr->accumulatingIndexOp) != RNIL)
{
indexOp = c_theIndexOperationPool.getPtr(indexOpPtr.i);
if (saveINDXATTRINFO(signal,
indexOp,
src,
attrInfoLength)) {
attrInfoLength) == 0) {
jam();
// We have received all we need
readIndexTable(signal, regApiPtr, indexOp);
return;
}
return;
}
}
......@@ -11766,12 +11827,13 @@ void Dbtc::execINDXATTRINFO(Signal* signal)
* Save signal INDXKEYINFO
* Return true if we have received all needed data
*/
bool Dbtc::saveINDXKEYINFO(Signal* signal,
int
Dbtc::saveINDXKEYINFO(Signal* signal,
TcIndexOperation* indexOp,
const Uint32 *src,
Uint32 len)
{
if (!indexOp->keyInfo.append(src, len)) {
if (ERROR_INSERTED(8039) || !indexOp->keyInfo.append(src, len)) {
jam();
// Failed to seize keyInfo, abort transaction
#ifdef VM_TRACE
......@@ -11781,15 +11843,17 @@ bool Dbtc::saveINDXKEYINFO(Signal* signal,
apiConnectptr.i = indexOp->connectionIndex;
ptrCheckGuard(apiConnectptr, capiConnectFilesize, apiConnectRecord);
releaseIndexOperation(apiConnectptr.p, indexOp);
terrorCode = 4000;
terrorCode = 289;
if(TcKeyReq::getExecuteFlag(indexOp->tcIndxReq.requestInfo))
apiConnectptr.p->m_exec_flag= 1;
abortErrorLab(signal);
return false;
return -1;
}
if (receivedAllINDXKEYINFO(indexOp) && receivedAllINDXATTRINFO(indexOp)) {
jam();
return true;
return 0;
}
return false;
return 1;
}
bool Dbtc::receivedAllINDXKEYINFO(TcIndexOperation* indexOp)
......@@ -11801,12 +11865,13 @@ bool Dbtc::receivedAllINDXKEYINFO(TcIndexOperation* indexOp)
* Save signal INDXATTRINFO
* Return true if we have received all needed data
*/
bool Dbtc::saveINDXATTRINFO(Signal* signal,
int
Dbtc::saveINDXATTRINFO(Signal* signal,
TcIndexOperation* indexOp,
const Uint32 *src,
Uint32 len)
{
if (!indexOp->attrInfo.append(src, len)) {
if (ERROR_INSERTED(8051) || !indexOp->attrInfo.append(src, len)) {
jam();
#ifdef VM_TRACE
ndbout_c("Dbtc::saveINDXATTRINFO: Failed to seize attrInfo\n");
......@@ -11814,15 +11879,17 @@ bool Dbtc::saveINDXATTRINFO(Signal* signal,
apiConnectptr.i = indexOp->connectionIndex;
ptrCheckGuard(apiConnectptr, capiConnectFilesize, apiConnectRecord);
releaseIndexOperation(apiConnectptr.p, indexOp);
terrorCode = 4000;
terrorCode = 289;
if(TcKeyReq::getExecuteFlag(indexOp->tcIndxReq.requestInfo))
apiConnectptr.p->m_exec_flag= 1;
abortErrorLab(signal);
return false;
return -1;
}
if (receivedAllINDXKEYINFO(indexOp) && receivedAllINDXATTRINFO(indexOp)) {
jam();
return true;
return 0;
}
return false;
return 1;
}
bool Dbtc::receivedAllINDXATTRINFO(TcIndexOperation* indexOp)
......@@ -12006,6 +12073,9 @@ void Dbtc::execTCKEYREF(Signal* signal)
tcIndxRef->transId[0] = tcKeyRef->transId[0];
tcIndxRef->transId[1] = tcKeyRef->transId[1];
tcIndxRef->errorCode = tcKeyRef->errorCode;
releaseIndexOperation(regApiPtr, indexOp);
sendSignal(regApiPtr->ndbapiBlockref,
GSN_TCINDXREF, signal, TcKeyRef::SignalLength, JBB);
return;
......@@ -12538,7 +12608,18 @@ void Dbtc::executeIndexOperation(Signal* signal,
bool Dbtc::seizeIndexOperation(ApiConnectRecord* regApiPtr,
TcIndexOperationPtr& indexOpPtr)
{
return regApiPtr->theSeizedIndexOperations.seize(indexOpPtr);
if (regApiPtr->theSeizedIndexOperations.seize(indexOpPtr))
{
ndbassert(indexOpPtr.p->expectedKeyInfo == 0);
ndbassert(indexOpPtr.p->keyInfo.getSize() == 0);
ndbassert(indexOpPtr.p->expectedAttrInfo == 0);
ndbassert(indexOpPtr.p->attrInfo.getSize() == 0);
ndbassert(indexOpPtr.p->expectedTransIdAI == 0);
ndbassert(indexOpPtr.p->transIdAI.getSize() == 0);
return true;
}
return false;
}
void Dbtc::releaseIndexOperation(ApiConnectRecord* regApiPtr,
......
......@@ -892,7 +892,12 @@ NdbBlob::readParts(char* buf, Uint32 part, Uint32 count)
while (n < count) {
NdbOperation* tOp = theNdbCon->getNdbOperation(theBlobTable);
if (tOp == NULL ||
tOp->committedRead() == -1 ||
/*
* This was committedRead() before. However lock on main
* table tuple does not fully protect blob parts since DBTUP
* commits each tuple separately.
*/
tOp->readTuple() == -1 ||
setPartKeyValue(tOp, part + n) == -1 ||
tOp->getValue((Uint32)3, buf) == NULL) {
setErrorCode(tOp);
......
......@@ -481,12 +481,27 @@ NdbTransaction::executeNoBlobs(ExecType aTypeOfExec,
while (1) {
int noOfComp = tNdb->sendPollNdb(3 * timeout, 1, forceSend);
if (noOfComp == 0) {
/**
* This timeout situation can occur if NDB crashes.
/*
* Just for fun, this is only one of two places where
* we could hit this error... It's quite possible we
* hit it in Ndbif.cpp in Ndb::check_send_timeout()
*
* We behave rather similarly in both places.
* Hitting this is certainly a bug though...
*/
ndbout << "This timeout should never occur, execute(..)" << endl;
g_eventLogger.error("WARNING: Timeout in executeNoBlobs() waiting for "
"response from NDB data nodes. This should NEVER "
"occur. You have likely hit a NDB Bug. Please "
"file a bug.");
DBUG_PRINT("error",("This timeout should never occure, execute()"));
g_eventLogger.error("Forcibly trying to rollback txn (%p"
") to try to clean up data node resources.",
this);
executeNoBlobs(NdbTransaction::Rollback);
theError.code = 4012;
setOperationErrorCodeAbort(4012); // Error code for "Cluster Failure"
theError.status= NdbError::PermanentError;
theError.classification= NdbError::TimeoutExpired;
setOperationErrorCodeAbort(4012); // ndbd timeout
DBUG_RETURN(-1);
}//if
......@@ -550,6 +565,11 @@ NdbTransaction::executeAsynchPrepare( ExecType aTypeOfExec,
*/
if (theError.code != 0)
DBUG_PRINT("enter", ("Resetting error %d on execute", theError.code));
/**
* for timeout (4012) we want sendROLLBACK to behave differently.
* Else, normal behaviour of reset errcode
*/
if (theError.code != 4012)
theError.code = 0;
NdbScanOperation* tcOp = m_theFirstScanOperation;
if (tcOp != 0){
......@@ -873,6 +893,12 @@ NdbTransaction::sendROLLBACK() // Send a TCROLLBACKREQ signal;
tSignal.setData(theTCConPtr, 1);
tSignal.setData(tTransId1, 2);
tSignal.setData(tTransId2, 3);
if(theError.code == 4012)
{
g_eventLogger.error("Sending TCROLLBACKREQ with Bad flag");
tSignal.setLength(tSignal.getLength() + 1); // + flags
tSignal.setData(0x1, 4); // potentially bad data
}
tReturnCode = tp->sendSignal(&tSignal,theDBnode);
if (tReturnCode != -1) {
theSendStatus = sendTC_ROLLBACK;
......
......@@ -173,6 +173,8 @@ ErrorBundle ErrorCodes[] = {
{ 4022, TR, "Out of Send Buffer space in NDB API" },
{ 4032, TR, "Out of Send Buffer space in NDB API" },
{ 288, TR, "Out of index operations in transaction coordinator (increase MaxNoOfConcurrentIndexOperations)" },
{ 289, TR, "Out of transaction buffer memory in TC (increase TransactionBufferMemory)" },
/**
* InsufficientSpace
*/
......
......@@ -1297,6 +1297,102 @@ runBug25059(NDBT_Context* ctx, NDBT_Step* step)
return res;
}
int tcSaveINDX_test(NDBT_Context* ctx, NDBT_Step* step, int inject_err)
{
int result= NDBT_OK;
Ndb* pNdb = GETNDB(step);
NdbDictionary::Dictionary * dict = pNdb->getDictionary();
const NdbDictionary::Index * idx = dict->getIndex(pkIdxName, *ctx->getTab());
HugoOperations ops(*ctx->getTab(), idx);
g_err << "Using INDEX: " << pkIdxName << endl;
NdbRestarter restarter;
int loops = ctx->getNumLoops();
const int rows = ctx->getNumRecords();
const int batchsize = ctx->getProperty("BatchSize", 1);
for(int bs=1; bs < loops; bs++)
{
int c= 0;
while (c++ < loops)
{
g_err << "BS " << bs << " LOOP #" << c << endl;
g_err << "inserting error on op#" << c << endl;
CHECK(ops.startTransaction(pNdb) == 0);
for(int i=1;i<=c;i++)
{
if(i==c)
{
if(restarter.insertErrorInAllNodes(inject_err)!=0)
{
g_err << "**** FAILED to insert error" << endl;
result= NDBT_FAILED;
break;
}
}
CHECK(ops.indexReadRecords(pNdb, pkIdxName, i,false,1) == 0);
if(i%bs==0 || i==c)
{
if(i<c)
{
if(ops.execute_NoCommit(pNdb, AO_IgnoreError)!=NDBT_OK)
{
g_err << "**** executeNoCommit should have succeeded" << endl;
result= NDBT_FAILED;
}
}
else
{
if(ops.execute_NoCommit(pNdb, AO_IgnoreError)!=289)
{
g_err << "**** executeNoCommit should have failed with 289"
<< endl;
result= NDBT_FAILED;
}
g_err << "NdbError.code= " <<
ops.getTransaction()->getNdbError().code << endl;
break;
}
}
}
CHECK(ops.closeTransaction(pNdb) == 0);
if(restarter.insertErrorInAllNodes(0) != 0)
{
g_err << "**** Failed to error insert(0)" << endl;
return NDBT_FAILED;
}
CHECK(ops.startTransaction(pNdb) == 0);
if (ops.indexReadRecords(pNdb, pkIdxName,0,0,rows) != 0){
g_err << "**** Index read failed" << endl;
return NDBT_FAILED;
}
CHECK(ops.closeTransaction(pNdb) == 0);
}
}
return result;
}
int
runBug28804(NDBT_Context* ctx, NDBT_Step* step)
{
return tcSaveINDX_test(ctx, step, 8039);
}
int
runBug28804_ATTRINFO(NDBT_Context* ctx, NDBT_Step* step)
{
return tcSaveINDX_test(ctx, step, 8051);
}
NDBT_TESTSUITE(testIndex);
TESTCASE("CreateAll",
"Test that we can create all various indexes on each table\n"
......@@ -1628,6 +1724,27 @@ TESTCASE("Bug25059",
STEP(runBug25059);
FINALIZER(createPkIndex_Drop);
}
TESTCASE("Bug28804",
"Test behaviour on out of TransactionBufferMemory for index lookup"){
TC_PROPERTY("LoggedIndexes", (unsigned)0);
INITIALIZER(runClearTable);
INITIALIZER(createPkIndex);
INITIALIZER(runLoadTable);
STEP(runBug28804);
FINALIZER(createPkIndex_Drop);
FINALIZER(runClearTable);
}
TESTCASE("Bug28804_ATTRINFO",
"Test behaviour on out of TransactionBufferMemory for index lookup"
" in saveINDXATTRINFO"){
TC_PROPERTY("LoggedIndexes", (unsigned)0);
INITIALIZER(runClearTable);
INITIALIZER(createPkIndex);
INITIALIZER(runLoadTable);
STEP(runBug28804_ATTRINFO);
FINALIZER(createPkIndex_Drop);
FINALIZER(runClearTable);
}
NDBT_TESTSUITE_END(testIndex);
int main(int argc, const char** argv){
......
......@@ -779,3 +779,11 @@ cmd: DbAsyncGenerator
args: -time 60 -p 1 -proc 25
type: bench
max-time: 180
cmd: testIndex
args: -n Bug28804 T1 T3
max-time: 180
cmd: testIndex
args: -n Bug28804_ATTRINFO T1 T3
......@@ -1758,9 +1758,15 @@ int ha_ndbcluster::unique_index_read(const byte *key,
if (execute_no_commit_ie(this,trans,false) != 0)
{
int err= ndb_err(trans);
if(err==HA_ERR_KEY_NOT_FOUND)
table->status= STATUS_NOT_FOUND;
DBUG_RETURN(ndb_err(trans));
else
table->status= STATUS_GARBAGE;
DBUG_RETURN(err);
}
// The value have now been fetched from NDB
unpack_record(buf);
table->status= 0;
......@@ -3310,6 +3316,8 @@ int ha_ndbcluster::info(uint flag)
DBUG_PRINT("info", ("HA_STATUS_AUTO"));
if (m_table && table->found_next_number_field)
{
if ((my_errno= check_ndb_connection()))
DBUG_RETURN(my_errno);
Ndb *ndb= get_ndb();
Uint64 auto_increment_value64;
......@@ -6462,7 +6470,8 @@ ha_ndbcluster::read_multi_range_first(KEY_MULTI_RANGE **found_range_p,
if (uses_blob_value(m_retrieve_all_fields) ||
(cur_index_type == UNIQUE_INDEX &&
has_null_in_unique_index(active_index) &&
null_value_index_search(ranges, ranges+range_count, buffer)))
null_value_index_search(ranges, ranges+range_count, buffer))
|| m_delete_cannot_batch || m_update_cannot_batch)
{
m_disable_multi_read= TRUE;
DBUG_RETURN(handler::read_multi_range_first(found_range_p,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment