Commit 3be8b9a3 authored by unknown's avatar unknown

Merge zim.(none):/home/brian/mysql/dep-5.1

into  zim.(none):/home/brian/mysql/remove-bdb-5.1

parents 14bd339e 336d1923
...@@ -64,17 +64,26 @@ pk u o ...@@ -64,17 +64,26 @@ pk u o
insert into t1 values (1,1,1); insert into t1 values (1,1,1);
drop table t1; drop table t1;
create table t1 (x integer not null primary key, y varchar(32), z integer, key(z)) engine = ndb; create table t1 (x integer not null primary key, y varchar(32), z integer, key(z)) engine = ndb;
insert into t1 values (1,'one',1), (2,'two',2),(3,"three",3); insert into t1 values (1,'one',1);
begin; begin;
select * from t1 where x = 1 for update; select * from t1 where x = 1 for update;
x y z x y z
1 one 1 1 one 1
begin; begin;
select * from t1 where x = 2 for update; select * from t1 where x = 1 for update;
ERROR HY000: Lock wait timeout exceeded; try restarting transaction
rollback;
rollback;
insert into t1 values (2,'two',2),(3,"three",3);
begin;
select * from t1 where x = 1 for update;
x y z x y z
2 two 2 1 one 1
select * from t1 where x = 1 for update; select * from t1 where x = 1 for update;
ERROR HY000: Lock wait timeout exceeded; try restarting transaction ERROR HY000: Lock wait timeout exceeded; try restarting transaction
select * from t1 where x = 2 for update;
x y z
2 two 2
rollback; rollback;
commit; commit;
begin; begin;
......
...@@ -73,7 +73,7 @@ drop table t1; ...@@ -73,7 +73,7 @@ drop table t1;
create table t1 (x integer not null primary key, y varchar(32), z integer, key(z)) engine = ndb; create table t1 (x integer not null primary key, y varchar(32), z integer, key(z)) engine = ndb;
insert into t1 values (1,'one',1), (2,'two',2),(3,"three",3); insert into t1 values (1,'one',1);
# PK access # PK access
connection con1; connection con1;
...@@ -82,11 +82,22 @@ select * from t1 where x = 1 for update; ...@@ -82,11 +82,22 @@ select * from t1 where x = 1 for update;
connection con2; connection con2;
begin; begin;
select * from t1 where x = 2 for update;
--error 1205 --error 1205
select * from t1 where x = 1 for update; select * from t1 where x = 1 for update;
rollback; rollback;
connection con1;
rollback;
insert into t1 values (2,'two',2),(3,"three",3);
begin;
select * from t1 where x = 1 for update;
connection con2;
--error 1205
select * from t1 where x = 1 for update;
select * from t1 where x = 2 for update;
rollback;
connection con1; connection con1;
commit; commit;
......
...@@ -256,13 +256,15 @@ int execute_no_commit_ignore_no_key(ha_ndbcluster *h, NdbTransaction *trans) ...@@ -256,13 +256,15 @@ int execute_no_commit_ignore_no_key(ha_ndbcluster *h, NdbTransaction *trans)
} }
inline inline
int execute_no_commit(ha_ndbcluster *h, NdbTransaction *trans) int execute_no_commit(ha_ndbcluster *h, NdbTransaction *trans,
bool force_release)
{ {
#ifdef NOT_USED #ifdef NOT_USED
int m_batch_execute= 0; int m_batch_execute= 0;
if (m_batch_execute) if (m_batch_execute)
return 0; return 0;
#endif #endif
h->release_completed_operations(trans, force_release);
return h->m_ignore_no_key ? return h->m_ignore_no_key ?
execute_no_commit_ignore_no_key(h,trans) : execute_no_commit_ignore_no_key(h,trans) :
trans->execute(NdbTransaction::NoCommit, trans->execute(NdbTransaction::NoCommit,
...@@ -297,13 +299,15 @@ int execute_commit(THD *thd, NdbTransaction *trans) ...@@ -297,13 +299,15 @@ int execute_commit(THD *thd, NdbTransaction *trans)
} }
inline inline
int execute_no_commit_ie(ha_ndbcluster *h, NdbTransaction *trans) int execute_no_commit_ie(ha_ndbcluster *h, NdbTransaction *trans,
bool force_release)
{ {
#ifdef NOT_USED #ifdef NOT_USED
int m_batch_execute= 0; int m_batch_execute= 0;
if (m_batch_execute) if (m_batch_execute)
return 0; return 0;
#endif #endif
h->release_completed_operations(trans, force_release);
return trans->execute(NdbTransaction::NoCommit, return trans->execute(NdbTransaction::NoCommit,
NdbTransaction::AO_IgnoreError, NdbTransaction::AO_IgnoreError,
h->m_force_send); h->m_force_send);
...@@ -328,6 +332,7 @@ Thd_ndb::Thd_ndb() ...@@ -328,6 +332,7 @@ Thd_ndb::Thd_ndb()
all= NULL; all= NULL;
stmt= NULL; stmt= NULL;
error= 0; error= 0;
query_state&= NDB_QUERY_NORMAL;
options= 0; options= 0;
(void) hash_init(&open_tables, &my_charset_bin, 5, 0, 0, (void) hash_init(&open_tables, &my_charset_bin, 5, 0, 0,
(hash_get_key)thd_ndb_share_get_key, 0, 0); (hash_get_key)thd_ndb_share_get_key, 0, 0);
...@@ -1696,7 +1701,7 @@ int ha_ndbcluster::pk_read(const byte *key, uint key_len, byte *buf, ...@@ -1696,7 +1701,7 @@ int ha_ndbcluster::pk_read(const byte *key, uint key_len, byte *buf,
ERR_RETURN(trans->getNdbError()); ERR_RETURN(trans->getNdbError());
} }
if (execute_no_commit_ie(this,trans) != 0) if (execute_no_commit_ie(this,trans,false) != 0)
{ {
table->status= STATUS_NOT_FOUND; table->status= STATUS_NOT_FOUND;
DBUG_RETURN(ndb_err(trans)); DBUG_RETURN(ndb_err(trans));
...@@ -1761,7 +1766,7 @@ int ha_ndbcluster::complemented_read(const byte *old_data, byte *new_data, ...@@ -1761,7 +1766,7 @@ int ha_ndbcluster::complemented_read(const byte *old_data, byte *new_data,
} }
} }
if (execute_no_commit(this,trans) != 0) if (execute_no_commit(this,trans,false) != 0)
{ {
table->status= STATUS_NOT_FOUND; table->status= STATUS_NOT_FOUND;
DBUG_RETURN(ndb_err(trans)); DBUG_RETURN(ndb_err(trans));
...@@ -1914,7 +1919,7 @@ int ha_ndbcluster::peek_indexed_rows(const byte *record) ...@@ -1914,7 +1919,7 @@ int ha_ndbcluster::peek_indexed_rows(const byte *record)
} }
last= trans->getLastDefinedOperation(); last= trans->getLastDefinedOperation();
if (first) if (first)
res= execute_no_commit_ie(this,trans); res= execute_no_commit_ie(this,trans,false);
else else
{ {
// Table has no keys // Table has no keys
...@@ -1963,7 +1968,7 @@ int ha_ndbcluster::unique_index_read(const byte *key, ...@@ -1963,7 +1968,7 @@ int ha_ndbcluster::unique_index_read(const byte *key,
if ((res= define_read_attrs(buf, op))) if ((res= define_read_attrs(buf, op)))
DBUG_RETURN(res); DBUG_RETURN(res);
if (execute_no_commit_ie(this,trans) != 0) if (execute_no_commit_ie(this,trans,false) != 0)
{ {
table->status= STATUS_NOT_FOUND; table->status= STATUS_NOT_FOUND;
DBUG_RETURN(ndb_err(trans)); DBUG_RETURN(ndb_err(trans));
...@@ -2011,7 +2016,7 @@ inline int ha_ndbcluster::fetch_next(NdbScanOperation* cursor) ...@@ -2011,7 +2016,7 @@ inline int ha_ndbcluster::fetch_next(NdbScanOperation* cursor)
*/ */
if (m_ops_pending && m_blobs_pending) if (m_ops_pending && m_blobs_pending)
{ {
if (execute_no_commit(this,trans) != 0) if (execute_no_commit(this,trans,false) != 0)
DBUG_RETURN(ndb_err(trans)); DBUG_RETURN(ndb_err(trans));
m_ops_pending= 0; m_ops_pending= 0;
m_blobs_pending= FALSE; m_blobs_pending= FALSE;
...@@ -2043,7 +2048,7 @@ inline int ha_ndbcluster::fetch_next(NdbScanOperation* cursor) ...@@ -2043,7 +2048,7 @@ inline int ha_ndbcluster::fetch_next(NdbScanOperation* cursor)
{ {
if (m_transaction_on) if (m_transaction_on)
{ {
if (execute_no_commit(this,trans) != 0) if (execute_no_commit(this,trans,false) != 0)
DBUG_RETURN(-1); DBUG_RETURN(-1);
} }
else else
...@@ -2370,7 +2375,7 @@ int ha_ndbcluster::ordered_index_scan(const key_range *start_key, ...@@ -2370,7 +2375,7 @@ int ha_ndbcluster::ordered_index_scan(const key_range *start_key,
ERR_RETURN(trans->getNdbError()); ERR_RETURN(trans->getNdbError());
} }
if (execute_no_commit(this,trans) != 0) if (execute_no_commit(this,trans,false) != 0)
DBUG_RETURN(ndb_err(trans)); DBUG_RETURN(ndb_err(trans));
DBUG_RETURN(next_result(buf)); DBUG_RETURN(next_result(buf));
...@@ -2440,7 +2445,7 @@ int ha_ndbcluster::full_table_scan(byte *buf) ...@@ -2440,7 +2445,7 @@ int ha_ndbcluster::full_table_scan(byte *buf)
if ((res= define_read_attrs(buf, op))) if ((res= define_read_attrs(buf, op)))
DBUG_RETURN(res); DBUG_RETURN(res);
if (execute_no_commit(this,trans) != 0) if (execute_no_commit(this,trans,false) != 0)
DBUG_RETURN(ndb_err(trans)); DBUG_RETURN(ndb_err(trans));
DBUG_PRINT("exit", ("Scan started successfully")); DBUG_PRINT("exit", ("Scan started successfully"));
DBUG_RETURN(next_result(buf)); DBUG_RETURN(next_result(buf));
...@@ -2603,7 +2608,7 @@ int ha_ndbcluster::write_row(byte *record) ...@@ -2603,7 +2608,7 @@ int ha_ndbcluster::write_row(byte *record)
m_bulk_insert_not_flushed= FALSE; m_bulk_insert_not_flushed= FALSE;
if (m_transaction_on) if (m_transaction_on)
{ {
if (execute_no_commit(this,trans) != 0) if (execute_no_commit(this,trans,false) != 0)
{ {
m_skip_auto_increment= TRUE; m_skip_auto_increment= TRUE;
no_uncommitted_rows_execute_failure(); no_uncommitted_rows_execute_failure();
...@@ -2840,7 +2845,7 @@ int ha_ndbcluster::update_row(const byte *old_data, byte *new_data) ...@@ -2840,7 +2845,7 @@ int ha_ndbcluster::update_row(const byte *old_data, byte *new_data)
op->setValue(no_fields, part_func_value); op->setValue(no_fields, part_func_value);
} }
// Execute update operation // Execute update operation
if (!cursor && execute_no_commit(this,trans) != 0) { if (!cursor && execute_no_commit(this,trans,false) != 0) {
no_uncommitted_rows_execute_failure(); no_uncommitted_rows_execute_failure();
DBUG_RETURN(ndb_err(trans)); DBUG_RETURN(ndb_err(trans));
} }
...@@ -2926,7 +2931,7 @@ int ha_ndbcluster::delete_row(const byte *record) ...@@ -2926,7 +2931,7 @@ int ha_ndbcluster::delete_row(const byte *record)
} }
// Execute delete operation // Execute delete operation
if (execute_no_commit(this,trans) != 0) { if (execute_no_commit(this,trans,false) != 0) {
no_uncommitted_rows_execute_failure(); no_uncommitted_rows_execute_failure();
DBUG_RETURN(ndb_err(trans)); DBUG_RETURN(ndb_err(trans));
} }
...@@ -3392,6 +3397,26 @@ int ha_ndbcluster::close_scan() ...@@ -3392,6 +3397,26 @@ int ha_ndbcluster::close_scan()
NdbScanOperation *cursor= m_active_cursor ? m_active_cursor : m_multi_cursor; NdbScanOperation *cursor= m_active_cursor ? m_active_cursor : m_multi_cursor;
if (m_lock_tuple)
{
/*
Lock level m_lock.type either TL_WRITE_ALLOW_WRITE
(SELECT FOR UPDATE) or TL_READ_WITH_SHARED_LOCKS (SELECT
LOCK WITH SHARE MODE) and row was not explictly unlocked
with unlock_row() call
*/
NdbOperation *op;
// Lock row
DBUG_PRINT("info", ("Keeping lock on scanned row"));
if (!(op= cursor->lockCurrentTuple()))
{
m_lock_tuple= false;
ERR_RETURN(trans->getNdbError());
}
m_ops_pending++;
}
m_lock_tuple= false;
if (m_ops_pending) if (m_ops_pending)
{ {
/* /*
...@@ -3399,7 +3424,7 @@ int ha_ndbcluster::close_scan() ...@@ -3399,7 +3424,7 @@ int ha_ndbcluster::close_scan()
deleteing/updating transaction before closing the scan deleteing/updating transaction before closing the scan
*/ */
DBUG_PRINT("info", ("ops_pending: %d", m_ops_pending)); DBUG_PRINT("info", ("ops_pending: %d", m_ops_pending));
if (execute_no_commit(this,trans) != 0) { if (execute_no_commit(this,trans,false) != 0) {
no_uncommitted_rows_execute_failure(); no_uncommitted_rows_execute_failure();
DBUG_RETURN(ndb_err(trans)); DBUG_RETURN(ndb_err(trans));
} }
...@@ -3793,7 +3818,7 @@ int ha_ndbcluster::end_bulk_insert() ...@@ -3793,7 +3818,7 @@ int ha_ndbcluster::end_bulk_insert()
m_bulk_insert_not_flushed= FALSE; m_bulk_insert_not_flushed= FALSE;
if (m_transaction_on) if (m_transaction_on)
{ {
if (execute_no_commit(this, trans) != 0) if (execute_no_commit(this, trans,false) != 0)
{ {
no_uncommitted_rows_execute_failure(); no_uncommitted_rows_execute_failure();
my_errno= error= ndb_err(trans); my_errno= error= ndb_err(trans);
...@@ -3968,6 +3993,7 @@ int ha_ndbcluster::external_lock(THD *thd, int lock_type) ...@@ -3968,6 +3993,7 @@ int ha_ndbcluster::external_lock(THD *thd, int lock_type)
ERR_RETURN(ndb->getNdbError()); ERR_RETURN(ndb->getNdbError());
thd_ndb->init_open_tables(); thd_ndb->init_open_tables();
thd_ndb->stmt= trans; thd_ndb->stmt= trans;
thd_ndb->query_state&= NDB_QUERY_NORMAL;
trans_register_ha(thd, FALSE, &ndbcluster_hton); trans_register_ha(thd, FALSE, &ndbcluster_hton);
} }
else else
...@@ -3983,6 +4009,7 @@ int ha_ndbcluster::external_lock(THD *thd, int lock_type) ...@@ -3983,6 +4009,7 @@ int ha_ndbcluster::external_lock(THD *thd, int lock_type)
ERR_RETURN(ndb->getNdbError()); ERR_RETURN(ndb->getNdbError());
thd_ndb->init_open_tables(); thd_ndb->init_open_tables();
thd_ndb->all= trans; thd_ndb->all= trans;
thd_ndb->query_state&= NDB_QUERY_NORMAL;
trans_register_ha(thd, TRUE, &ndbcluster_hton); trans_register_ha(thd, TRUE, &ndbcluster_hton);
/* /*
...@@ -4139,6 +4166,7 @@ int ha_ndbcluster::start_stmt(THD *thd, thr_lock_type lock_type) ...@@ -4139,6 +4166,7 @@ int ha_ndbcluster::start_stmt(THD *thd, thr_lock_type lock_type)
thd_ndb->stmt= trans; thd_ndb->stmt= trans;
trans_register_ha(thd, FALSE, &ndbcluster_hton); trans_register_ha(thd, FALSE, &ndbcluster_hton);
} }
thd_ndb->query_state&= NDB_QUERY_NORMAL;
m_active_trans= trans; m_active_trans= trans;
// Start of statement // Start of statement
...@@ -7557,6 +7585,30 @@ int ha_ndbcluster::write_ndb_file(const char *name) ...@@ -7557,6 +7585,30 @@ int ha_ndbcluster::write_ndb_file(const char *name)
DBUG_RETURN(error); DBUG_RETURN(error);
} }
void
ha_ndbcluster::release_completed_operations(NdbTransaction *trans,
bool force_release)
{
if (trans->hasBlobOperation())
{
/* We are reading/writing BLOB fields,
releasing operation records is unsafe
*/
return;
}
if (!force_release)
{
if (get_thd_ndb(current_thd)->query_state & NDB_QUERY_MULTI_READ_RANGE)
{
/* We are batching reads and have not consumed all fetched
rows yet, releasing operation records is unsafe
*/
return;
}
}
trans->releaseCompletedOperations();
}
int int
ha_ndbcluster::read_multi_range_first(KEY_MULTI_RANGE **found_range_p, ha_ndbcluster::read_multi_range_first(KEY_MULTI_RANGE **found_range_p,
KEY_MULTI_RANGE *ranges, KEY_MULTI_RANGE *ranges,
...@@ -7572,6 +7624,7 @@ ha_ndbcluster::read_multi_range_first(KEY_MULTI_RANGE **found_range_p, ...@@ -7572,6 +7624,7 @@ ha_ndbcluster::read_multi_range_first(KEY_MULTI_RANGE **found_range_p,
NDB_INDEX_TYPE index_type= get_index_type(active_index); NDB_INDEX_TYPE index_type= get_index_type(active_index);
ulong reclength= table_share->reclength; ulong reclength= table_share->reclength;
NdbOperation* op; NdbOperation* op;
Thd_ndb *thd_ndb= get_thd_ndb(current_thd);
if (uses_blob_value()) if (uses_blob_value())
{ {
...@@ -7585,7 +7638,7 @@ ha_ndbcluster::read_multi_range_first(KEY_MULTI_RANGE **found_range_p, ...@@ -7585,7 +7638,7 @@ ha_ndbcluster::read_multi_range_first(KEY_MULTI_RANGE **found_range_p,
sorted, sorted,
buffer)); buffer));
} }
thd_ndb->query_state|= NDB_QUERY_MULTI_READ_RANGE;
m_disable_multi_read= FALSE; m_disable_multi_read= FALSE;
/** /**
...@@ -7757,7 +7810,7 @@ ha_ndbcluster::read_multi_range_first(KEY_MULTI_RANGE **found_range_p, ...@@ -7757,7 +7810,7 @@ ha_ndbcluster::read_multi_range_first(KEY_MULTI_RANGE **found_range_p,
*/ */
m_current_multi_operation= m_current_multi_operation=
lastOp ? lastOp->next() : m_active_trans->getFirstDefinedOperation(); lastOp ? lastOp->next() : m_active_trans->getFirstDefinedOperation();
if (!(res= execute_no_commit_ie(this, m_active_trans))) if (!(res= execute_no_commit_ie(this, m_active_trans, true)))
{ {
m_multi_range_defined= multi_range_curr; m_multi_range_defined= multi_range_curr;
multi_range_curr= ranges; multi_range_curr= ranges;
......
...@@ -534,6 +534,11 @@ class Ndb_cond_traverse_context ...@@ -534,6 +534,11 @@ class Ndb_cond_traverse_context
Ndb_rewrite_context *rewrite_stack; Ndb_rewrite_context *rewrite_stack;
}; };
typedef enum ndb_query_state_bits {
NDB_QUERY_NORMAL = 0,
NDB_QUERY_MULTI_READ_RANGE = 1
} NDB_QUERY_STATE_BITS;
/* /*
Place holder for ha_ndbcluster thread specific data Place holder for ha_ndbcluster thread specific data
*/ */
...@@ -571,6 +576,7 @@ class Thd_ndb ...@@ -571,6 +576,7 @@ class Thd_ndb
int error; int error;
uint32 options; uint32 options;
List<NDB_SHARE> changed_tables; List<NDB_SHARE> changed_tables;
uint query_state;
HASH open_tables; HASH open_tables;
}; };
...@@ -849,8 +855,8 @@ private: ...@@ -849,8 +855,8 @@ private:
friend int execute_commit(ha_ndbcluster*, NdbTransaction*); friend int execute_commit(ha_ndbcluster*, NdbTransaction*);
friend int execute_no_commit_ignore_no_key(ha_ndbcluster*, NdbTransaction*); friend int execute_no_commit_ignore_no_key(ha_ndbcluster*, NdbTransaction*);
friend int execute_no_commit(ha_ndbcluster*, NdbTransaction*); friend int execute_no_commit(ha_ndbcluster*, NdbTransaction*, bool);
friend int execute_no_commit_ie(ha_ndbcluster*, NdbTransaction*); friend int execute_no_commit_ie(ha_ndbcluster*, NdbTransaction*, bool);
NdbTransaction *m_active_trans; NdbTransaction *m_active_trans;
NdbScanOperation *m_active_cursor; NdbScanOperation *m_active_cursor;
...@@ -898,6 +904,8 @@ private: ...@@ -898,6 +904,8 @@ private:
bool m_force_send; bool m_force_send;
ha_rows m_autoincrement_prefetch; ha_rows m_autoincrement_prefetch;
bool m_transaction_on; bool m_transaction_on;
void release_completed_operations(NdbTransaction*, bool);
Ndb_cond_stack *m_cond_stack; Ndb_cond_stack *m_cond_stack;
bool m_disable_multi_read; bool m_disable_multi_read;
byte *m_multi_range_result_ptr; byte *m_multi_range_result_ptr;
......
...@@ -140,6 +140,7 @@ class NdbTransaction ...@@ -140,6 +140,7 @@ class NdbTransaction
friend class NdbIndexOperation; friend class NdbIndexOperation;
friend class NdbIndexScanOperation; friend class NdbIndexScanOperation;
friend class NdbBlob; friend class NdbBlob;
friend class ha_ndbcluster;
#endif #endif
public: public:
...@@ -791,6 +792,7 @@ private: ...@@ -791,6 +792,7 @@ private:
// optim: any blobs // optim: any blobs
bool theBlobFlag; bool theBlobFlag;
Uint8 thePendingBlobOps; Uint8 thePendingBlobOps;
inline bool hasBlobOperation() { return theBlobFlag; }
static void sendTC_COMMIT_ACK(class TransporterFacade *, NdbApiSignal *, static void sendTC_COMMIT_ACK(class TransporterFacade *, NdbApiSignal *,
Uint32 transId1, Uint32 transId2, Uint32 transId1, Uint32 transId2,
......
...@@ -218,12 +218,10 @@ client/server version. ...@@ -218,12 +218,10 @@ client/server version.
%{see_base} %{see_base}
%prep %prep
# We unpack the source three times, for 'debug', 'max' and 'release' build. # We unpack the source two times, for 'debug' and 'release' build.
%setup -T -a 0 -c -n mysql-%{mysql_version} %setup -T -a 0 -c -n mysql-%{mysql_version}
mv mysql-%{mysql_version} mysql-debug-%{mysql_version} mv mysql-%{mysql_version} mysql-debug-%{mysql_version}
%setup -D -T -a 0 -n mysql-%{mysql_version} %setup -D -T -a 0 -n mysql-%{mysql_version}
mv mysql-%{mysql_version} mysql-max-%{mysql_version}
%setup -D -T -a 0 -n mysql-%{mysql_version}
mv mysql-%{mysql_version} mysql-release-%{mysql_version} mv mysql-%{mysql_version} mysql-release-%{mysql_version}
%build %build
...@@ -332,35 +330,6 @@ fi ...@@ -332,35 +330,6 @@ fi
(cd mysql-debug-%{mysql_version} ; \ (cd mysql-debug-%{mysql_version} ; \
./mysql-test-run.pl --comment=debug --skip-rpl --skip-ndbcluster --force ; \ ./mysql-test-run.pl --comment=debug --skip-rpl --skip-ndbcluster --force ; \
true) true)
##############################################################################
#
# Build the max binary
#
##############################################################################
(cd mysql-max-%{mysql_version} &&
CFLAGS="${MYSQL_BUILD_CFLAGS:-$RPM_OPT_FLAGS} -g" \
CXXFLAGS="${MYSQL_BUILD_CXXFLAGS:-$RPM_OPT_FLAGS -felide-constructors -fno-exceptions -fno-rtti} -g" \
BuildMySQL "--enable-shared \
--with-berkeley-db \
--with-ndbcluster \
--with-archive-storage-engine \
--with-csv-storage-engine \
--with-example-storage-engine \
--with-blackhole-storage-engine \
--with-federated-storage-engine \
--with-big-tables \
--with-comment=\"MySQL Community Server - Max (GPL)\"")
# We might want to save the config log file
if test -n "$MYSQL_MAXCONFLOG_DEST"
then
cp -fp mysql-max-%{mysql_version}/config.log "$MYSQL_MAXCONFLOG_DEST"
fi
(cd mysql-max-%{mysql_version} ; \
./mysql-test-run.pl --comment=max --skip-ndbcluster --do-test=bdb --force ; \
true)
############################################################################## ##############################################################################
# #
...@@ -417,13 +386,10 @@ install -d $RBR%{_sbindir} ...@@ -417,13 +386,10 @@ install -d $RBR%{_sbindir}
# the same here. # the same here.
mv $RBR/%{_libdir}/mysql/*.so* $RBR/%{_libdir}/ mv $RBR/%{_libdir}/mysql/*.so* $RBR/%{_libdir}/
# install "mysqld-debug" and "mysqld-max" # install "mysqld-debug"
$MBD/libtool --mode=execute install -m 755 \ $MBD/libtool --mode=execute install -m 755 \
$RPM_BUILD_DIR/mysql-%{mysql_version}/mysql-debug-%{mysql_version}/sql/mysqld \ $RPM_BUILD_DIR/mysql-%{mysql_version}/mysql-debug-%{mysql_version}/sql/mysqld \
$RBR%{_sbindir}/mysqld-debug $RBR%{_sbindir}/mysqld-debug
$MBD/libtool --mode=execute install -m 755 \
$RPM_BUILD_DIR/mysql-%{mysql_version}/mysql-max-%{mysql_version}/sql/mysqld \
$RBR%{_sbindir}/mysqld-max
# install saved perror binary with NDB support (BUG#13740) # install saved perror binary with NDB support (BUG#13740)
install -m 755 $MBD/extra/perror $RBR%{_bindir}/perror install -m 755 $MBD/extra/perror $RBR%{_bindir}/perror
...@@ -601,7 +567,6 @@ fi ...@@ -601,7 +567,6 @@ fi
%attr(755, root, root) %{_sbindir}/mysqld %attr(755, root, root) %{_sbindir}/mysqld
%attr(755, root, root) %{_sbindir}/mysqld-debug %attr(755, root, root) %{_sbindir}/mysqld-debug
%attr(755, root, root) %{_sbindir}/mysqld-max
%attr(755, root, root) %{_sbindir}/mysqlmanager %attr(755, root, root) %{_sbindir}/mysqlmanager
%attr(755, root, root) %{_sbindir}/rcmysql %attr(755, root, root) %{_sbindir}/rcmysql
...@@ -707,6 +672,11 @@ fi ...@@ -707,6 +672,11 @@ fi
# itself - note that they must be ordered by date (important when # itself - note that they must be ordered by date (important when
# merging BK trees) # merging BK trees)
%changelog %changelog
* Tue Aug 15 2006 Joerg Bruehe <joerg@mysql.com>
- The "max" server is removed from packages, effective from 5.1.12-beta.
Delete all steps to build, package, or install it.
* Mon Jul 10 2006 Joerg Bruehe <joerg@mysql.com> * Mon Jul 10 2006 Joerg Bruehe <joerg@mysql.com>
- Fix a typing error in the "make" target for the Perl script to run the tests. - Fix a typing error in the "make" target for the Perl script to run the tests.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment