Commit aa84afb9 authored by monty@mysql.com's avatar monty@mysql.com

Merge bk-internal.mysql.com:/home/bk/mysql-4.1

into mysql.com:/home/my/mysql-4.1
parents fbf31c4a 4b9f462d
......@@ -45,6 +45,8 @@ dlenev@build.mysql.com
dlenev@jabberwock.localdomain
dlenev@mysql.com
ejonore@mc03.ndb.mysql.com
evgen@moonbone.(none)
evgen@moonbone.local
gbichot@quadita2.mysql.com
gbichot@quadxeon.mysql.com
georg@beethoven.local
......
......@@ -609,6 +609,7 @@ extern uint dirname_part(my_string to,const char *name);
extern uint dirname_length(const char *name);
#define base_name(A) (A+dirname_length(A))
extern int test_if_hard_path(const char *dir_name);
extern my_bool has_path(const char *name);
extern char *convert_dirname(char *to, const char *from, const char *from_end);
extern void to_unix_path(my_string name);
extern my_string fn_ext(const char *name);
......
......@@ -31,6 +31,7 @@
#define __GNU_LIBRARY__ /* Skip warnings in getopt.h */
#endif
#include <my_getopt.h>
#include <assert.h>
#if INT_MAX > 32767
#define BITS_SAVED 32
......@@ -1991,7 +1992,9 @@ static void write_bits (register ulong value, register uint bits)
{
reg3 uint byte_buff;
bits= (uint) -file_buffer.bits;
byte_buff=file_buffer.current_byte | (uint) (value >> bits);
DBUG_ASSERT(bits <= 8 * sizeof(value));
byte_buff= (file_buffer.current_byte |
((bits != 8 * sizeof(value)) ? (uint) (value >> bits) : 0));
#if BITS_SAVED == 32
*file_buffer.pos++= (byte) (byte_buff >> 24) ;
*file_buffer.pos++= (byte) (byte_buff >> 16) ;
......@@ -1999,7 +2002,9 @@ static void write_bits (register ulong value, register uint bits)
*file_buffer.pos++= (byte) (byte_buff >> 8) ;
*file_buffer.pos++= (byte) byte_buff;
value&=(1 << bits)-1;
DBUG_ASSERT(bits <= 8 * sizeof(ulong));
if (bits != 8 * sizeof(value))
value&= (((ulong) 1) << bits) - 1;
#if BITS_SAVED == 16
if (bits >= sizeof(uint))
{
......
......@@ -80,7 +80,7 @@ MYRG_INFO *myrg_open(const char *name, int mode, int handle_locking)
continue; /* Skip comments */
}
if (!test_if_hard_path(buff))
if (!has_path(buff))
{
VOID(strmake(name_buff+dir_length,buff,
sizeof(name_buff)-1-dir_length));
......
......@@ -702,3 +702,12 @@ c
val-74
val-98
drop table t1,t2;
create table t1 (b int4 unsigned not null);
insert into t1 values(3000000000);
select * from t1;
b
3000000000
select min(b) from t1;
min(b)
3000000000
drop table t1;
......@@ -13,6 +13,26 @@ a
show status like 'handler_discover%';
Variable_name Value
Handler_discover 0
select * from t1;
a
2
drop table t1;
create table t1 (a int) engine=ndbcluster;
insert into t1 value (2);
select * from t1;
a
2
show status like 'handler_discover%';
Variable_name Value
Handler_discover 0
drop table t1;
create table t1 (a int) engine=ndbcluster;
insert into t1 value (2);
select * from t1;
ERROR HY000: Got error 241 'Invalid schema object version' from ndbcluster
select * from t1;
a
2
flush status;
select * from t1;
a
......@@ -20,7 +40,7 @@ a
update t1 set a=3 where a=2;
show status like 'handler_discover%';
Variable_name Value
Handler_discover 1
Handler_discover 0
create table t3 (a int not null primary key, b varchar(22),
c int, last_col text) engine=ndb;
insert into t3 values(1, 'Hi!', 89, 'Longtext column');
......
......@@ -515,3 +515,10 @@ explain select c from t2 where a = 2 and b = 'val-2' group by c;
select c from t2 where a = 2 and b = 'val-2' group by c;
drop table t1,t2;
# Test for BUG#9298 "Wrong handling of int4 unsigned columns in GROUP functions"
# (the actual problem was with protocol code, not GROUP BY)
create table t1 (b int4 unsigned not null);
insert into t1 values(3000000000);
select * from t1;
select min(b) from t1;
drop table t1;
......@@ -18,6 +18,30 @@ select * from t1;
select * from t2;
show status like 'handler_discover%';
# Check dropping and recreating table on same server
connect (con1,localhost,,,test);
connect (con2,localhost,,,test);
connection con1;
select * from t1;
connection con2;
drop table t1;
create table t1 (a int) engine=ndbcluster;
insert into t1 value (2);
connection con1;
select * from t1;
# Check dropping and recreating table on different server
connection server2;
show status like 'handler_discover%';
drop table t1;
create table t1 (a int) engine=ndbcluster;
insert into t1 value (2);
connection server1;
# Currently a retry is required remotely
--error 1296
select * from t1;
select * from t1;
# Connect to server2 and use the tables from there
connection server2;
flush status;
......
......@@ -192,3 +192,25 @@ int test_if_hard_path(register const char *dir_name)
return FALSE;
#endif
} /* test_if_hard_path */
/*
Test if a name contains an (absolute or relative) path.
SYNOPSIS
has_path()
name The name to test.
RETURN
TRUE name contains a path.
FALSE name does not contain a path.
*/
my_bool has_path(const char *name)
{
return test(strchr(name, FN_LIBCHAR))
#ifdef FN_DEVCHAR
|| test(strchr(name, FN_DEVCHAR))
#endif
;
}
......@@ -75,8 +75,11 @@ public:
Changed, ///< The object has been modified in memory
///< and has to be commited in NDB Kernel for
///< changes to take effect
Retrieved ///< The object exist and has been read
Retrieved, ///< The object exist and has been read
///< into main memory from NDB Kernel
Invalid ///< The object has been invalidated
///< and should not be used
};
/**
......
......@@ -1448,6 +1448,7 @@ int NdbDictionaryImpl::alterTable(NdbTableImpl &impl)
// If in local cache it must be in global
if (!cachedImpl)
abort();
cachedImpl->m_status = NdbDictionary::Object::Invalid;
m_globalHash->drop(cachedImpl);
m_globalHash->unlock();
}
......@@ -1747,8 +1748,8 @@ NdbDictionaryImpl::dropTable(const char * name)
DBUG_PRINT("info",("INCOMPATIBLE_VERSION internal_name: %s", internalTableName));
m_localHash.drop(internalTableName);
m_globalHash->lock();
tab->m_status = NdbDictionary::Object::Invalid;
m_globalHash->drop(tab);
m_globalHash->unlock();
DBUG_RETURN(dropTable(name));
......@@ -1793,9 +1794,10 @@ NdbDictionaryImpl::dropTable(NdbTableImpl & impl)
if(ret == 0 || m_error.code == 709){
const char * internalTableName = impl.m_internalName.c_str();
m_localHash.drop(internalTableName);
m_localHash.drop(internalTableName);
m_globalHash->lock();
impl.m_status = NdbDictionary::Object::Invalid;
m_globalHash->drop(&impl);
m_globalHash->unlock();
......@@ -1889,6 +1891,7 @@ NdbDictionaryImpl::invalidateObject(NdbTableImpl & impl)
m_localHash.drop(internalTableName);
m_globalHash->lock();
impl.m_status = NdbDictionary::Object::Invalid;
m_globalHash->drop(&impl);
m_globalHash->unlock();
return 0;
......@@ -2152,8 +2155,8 @@ NdbDictionaryImpl::dropIndex(const char * indexName,
m_ndb.internalizeTableName(indexName); // Index is also a table
m_localHash.drop(internalIndexName);
m_globalHash->lock();
idx->m_table->m_status = NdbDictionary::Object::Invalid;
m_globalHash->drop(idx->m_table);
m_globalHash->unlock();
return dropIndex(indexName, tableName);
......@@ -2187,8 +2190,8 @@ NdbDictionaryImpl::dropIndex(NdbIndexImpl & impl, const char * tableName)
int ret = m_receiver.dropIndex(impl, *timpl);
if(ret == 0){
m_localHash.drop(internalIndexName);
m_globalHash->lock();
impl.m_table->m_status = NdbDictionary::Object::Invalid;
m_globalHash->drop(impl.m_table);
m_globalHash->unlock();
}
......
......@@ -381,6 +381,7 @@ int ha_myisammrg::create(const char *name, register TABLE *form,
char buff[FN_REFLEN],**table_names,**pos;
TABLE_LIST *tables= (TABLE_LIST*) create_info->merge_list.first;
THD *thd= current_thd;
uint dirlgt= dirname_length(name);
DBUG_ENTER("ha_myisammrg::create");
if (!(table_names= (char**) thd->alloc((create_info->merge_list.elements+1)*
......@@ -394,10 +395,29 @@ int ha_myisammrg::create(const char *name, register TABLE *form,
tbl= find_temporary_table(thd, tables->db, tables->real_name);
if (!tbl)
{
uint length= my_snprintf(buff,FN_REFLEN,"%s%s/%s",
mysql_real_data_home,
/*
Construct the path to the MyISAM table. Try to meet two conditions:
1.) Allow to include MyISAM tables from different databases, and
2.) allow for moving DATADIR around in the file system.
The first means that we need paths in the .MRG file. The second
means that we should not have absolute paths in the .MRG file.
The best, we can do, is to use 'mysql_data_home', which is '.'
in mysqld and may be an absolute path in an embedded server.
This means that it might not be possible to move the DATADIR of
an embedded server without changing the paths in the .MRG file.
*/
uint length= my_snprintf(buff, FN_REFLEN, "%s/%s/%s", mysql_data_home,
tables->db, tables->real_name);
if (!(table_name= thd->strmake(buff, length)))
/*
If a MyISAM table is in the same directory as the MERGE table,
we use the table name without a path. This means that the
DATADIR can easily be moved even for an embedded server as long
as the MyISAM tables are from the same database as the MERGE table.
*/
if ((dirname_length(buff) == dirlgt) && ! memcmp(buff, name, dirlgt))
table_name= tables->real_name;
else
if (! (table_name= thd->strmake(buff, length)))
DBUG_RETURN(HA_ERR_OUT_OF_MEM);
}
else
......
/* Copyright (C) 2000-2003 MySQL AB
/* Copyright (C) 2000-2003 MySQL AB
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
......@@ -331,11 +331,28 @@ void ha_ndbcluster::no_uncommitted_rows_reset(THD *thd)
# The mapped error code
*/
void ha_ndbcluster::invalidateDictionaryCache()
void ha_ndbcluster::invalidate_dictionary_cache(bool global)
{
NDBDICT *dict= get_ndb()->getDictionary();
DBUG_ENTER("invalidate_dictionary_cache");
DBUG_PRINT("info", ("invalidating %s", m_tabname));
if (global)
{
const NDBTAB *tab= dict->getTable(m_tabname);
if (!tab)
DBUG_VOID_RETURN;
if (tab->getObjectStatus() == NdbDictionary::Object::Invalid)
{
// Global cache has already been invalidated
dict->removeCachedTable(m_tabname);
global= FALSE;
}
else
dict->invalidateTable(m_tabname);
}
else
dict->removeCachedTable(m_tabname);
table->version=0L; /* Free when thread is ready */
/* Invalidate indexes */
for (uint i= 0; i < table->keys; i++)
......@@ -347,18 +364,28 @@ void ha_ndbcluster::invalidateDictionaryCache()
switch(idx_type) {
case(PRIMARY_KEY_ORDERED_INDEX):
case(ORDERED_INDEX):
if (global)
dict->invalidateIndex(index->getName(), m_tabname);
else
dict->removeCachedIndex(index->getName(), m_tabname);
break;
case(UNIQUE_ORDERED_INDEX):
if (global)
dict->invalidateIndex(index->getName(), m_tabname);
else
dict->removeCachedIndex(index->getName(), m_tabname);
case(UNIQUE_INDEX):
if (global)
dict->invalidateIndex(unique_index->getName(), m_tabname);
else
dict->removeCachedIndex(unique_index->getName(), m_tabname);
break;
case(PRIMARY_KEY_INDEX):
case(UNDEFINED_INDEX):
break;
}
}
DBUG_VOID_RETURN;
}
int ha_ndbcluster::ndb_err(NdbConnection *trans)
......@@ -371,7 +398,7 @@ int ha_ndbcluster::ndb_err(NdbConnection *trans)
switch (err.classification) {
case NdbError::SchemaError:
{
invalidateDictionaryCache();
invalidate_dictionary_cache(TRUE);
if (err.code==284)
{
......@@ -765,9 +792,16 @@ int ha_ndbcluster::get_metadata(const char *path)
const void *data, *pack_data;
uint length, pack_length;
if (!(tab= dict->getTable(m_tabname)))
ERR_RETURN(dict->getNdbError());
// Check if thread has stale local cache
if (tab->getObjectStatus() == NdbDictionary::Object::Invalid)
{
invalidate_dictionary_cache(FALSE);
if (!(tab= dict->getTable(m_tabname)))
ERR_RETURN(dict->getNdbError());
DBUG_PRINT("info", ("Table schema version: %d", tab->getObjectVersion()));
}
/*
Compare FrmData in NDB with frm file from disk.
*/
......@@ -786,7 +820,7 @@ int ha_ndbcluster::get_metadata(const char *path)
if (!invalidating_ndb_table)
{
DBUG_PRINT("info", ("Invalidating table"));
invalidateDictionaryCache();
invalidate_dictionary_cache(TRUE);
invalidating_ndb_table= TRUE;
}
else
......@@ -812,7 +846,7 @@ int ha_ndbcluster::get_metadata(const char *path)
if (error)
DBUG_RETURN(error);
m_tableVersion= tab->getObjectVersion();
m_table_version= tab->getObjectVersion();
m_table= (void *)tab;
m_table_info= NULL; // Set in external lock
......@@ -3226,15 +3260,25 @@ int ha_ndbcluster::external_lock(THD *thd, int lock_type)
void *tab_info;
if (!(tab= dict->getTable(m_tabname, &tab_info)))
ERR_RETURN(dict->getNdbError());
DBUG_PRINT("info", ("Table schema version: %d", tab->getObjectVersion()));
if (m_table != (void *)tab || m_tableVersion != tab->getObjectVersion())
DBUG_PRINT("info", ("Table schema version: %d",
tab->getObjectVersion()));
// Check if thread has stale local cache
if (tab->getObjectStatus() == NdbDictionary::Object::Invalid)
{
invalidate_dictionary_cache(FALSE);
if (!(tab= dict->getTable(m_tabname, &tab_info)))
ERR_RETURN(dict->getNdbError());
DBUG_PRINT("info", ("Table schema version: %d",
tab->getObjectVersion()));
}
if (m_table != (void *)tab || m_table_version < tab->getObjectVersion())
{
/*
The table has been altered, refresh the index list
*/
build_index_list(ndb, table, ILBP_OPEN);
m_table= (void *)tab;
m_tableVersion = tab->getObjectVersion();
m_table_version = tab->getObjectVersion();
}
m_table_info= tab_info;
}
......@@ -3260,7 +3304,6 @@ int ha_ndbcluster::external_lock(THD *thd, int lock_type)
thd->transaction.stmt.ndb_tid= 0;
}
}
m_table= NULL;
m_table_info= NULL;
/*
This is the place to make sure this handler instance
......@@ -3882,7 +3925,13 @@ int ha_ndbcluster::rename_table(const char *from, const char *to)
dict= ndb->getDictionary();
if (!(orig_tab= dict->getTable(m_tabname)))
ERR_RETURN(dict->getNdbError());
// Check if thread has stale local cache
if (orig_tab->getObjectStatus() == NdbDictionary::Object::Invalid)
{
dict->removeCachedTable(m_tabname);
if (!(orig_tab= dict->getTable(m_tabname)))
ERR_RETURN(dict->getNdbError());
}
m_table= (void *)orig_tab;
// Change current database to that of target table
set_dbname(to);
......@@ -4006,7 +4055,7 @@ ha_ndbcluster::ha_ndbcluster(TABLE *table_arg):
m_active_trans(NULL),
m_active_cursor(NULL),
m_table(NULL),
m_tableVersion(-1),
m_table_version(-1),
m_table_info(NULL),
m_table_flags(HA_REC_NOT_IN_SEQ |
HA_NULL_IN_KEY |
......@@ -4250,7 +4299,6 @@ int ndbcluster_discover(THD* thd, const char *db, const char *name,
DBUG_RETURN(1);
ERR_RETURN(err);
}
DBUG_PRINT("info", ("Found table %s", tab->getName()));
len= tab->getFrmLength();
......@@ -4327,6 +4375,7 @@ int ndbcluster_drop_database(const char *path)
uint i;
char *tabname;
List<char> drop_list;
int ret= 0;
ha_ndbcluster::set_dbname(path, (char *)&dbname);
DBUG_PRINT("enter", ("db: %s", dbname));
......@@ -4353,10 +4402,18 @@ int ndbcluster_drop_database(const char *path)
ndb->setDatabaseName(dbname);
List_iterator_fast<char> it(drop_list);
while ((tabname=it++))
{
if (dict->dropTable(tabname))
ERR_RETURN(dict->getNdbError());
DBUG_RETURN(0);
{
const NdbError err= dict->getNdbError();
if (err.code != 709)
{
ERR_PRINT(err);
ret= ndb_to_mysql_error(&err);
}
}
}
DBUG_RETURN(ret);
}
......
......@@ -203,7 +203,7 @@ class ha_ndbcluster: public handler
void print_results();
longlong get_auto_increment();
void invalidateDictionaryCache();
void invalidate_dictionary_cache(bool global);
int ndb_err(NdbConnection*);
bool uses_blob_value(bool all_fields);
......@@ -215,7 +215,7 @@ class ha_ndbcluster: public handler
NdbConnection *m_active_trans;
NdbResultSet *m_active_cursor;
void *m_table;
int m_tableVersion;
int m_table_version;
void *m_table_info;
char m_dbname[FN_HEADLEN];
//char m_schemaname[FN_HEADLEN];
......
......@@ -810,7 +810,7 @@ bool Protocol_simple::store_long(longlong from)
#endif
char buff[20];
return net_store_data((char*) buff,
(uint) (int10_to_str((int) from,buff, -10)-buff));
(uint) (int10_to_str((int)from,buff, (from <0)?-10:10)-buff));
}
......
......@@ -6893,6 +6893,11 @@ end_write_group(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)),
&join->tmp_table_param,
error, 0))
DBUG_RETURN(-1);
/*
If table->file->write_row() was failed because of 'out of memory'
and tmp table succesfully created, reset error.
*/
error=0;
}
if (join->rollup.state != ROLLUP::STATE_NONE && error <= 0)
{
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment