Commit 9f45c9e3 authored by serg@serg.mylan's avatar serg@serg.mylan

followup to handler cleanup

parent cfd78595
......@@ -1181,3 +1181,12 @@ a
A
a
drop table t1;
set autocommit=0;
create table t1(b varchar(30)) engine=bdb;
insert into t1 values ('one');
commit;
select b FROM t1 outer_table where
exists (select 'two' from t1 where 'two' = outer_table.b);
b
drop table t1;
set autocommit=1;
......@@ -822,3 +822,28 @@ alter table t1 modify a char(10) binary;
explain select a from t1;
select a from t1;
drop table t1;
#
# Bug #4000: problem with active cursor.
#
set autocommit=0;
create table t1(b varchar(30)) engine=bdb;
insert into t1 values ('one');
commit;
select b FROM t1 outer_table where
exists (select 'two' from t1 where 'two' = outer_table.b);
drop table t1;
set autocommit=1;
#
# Bug #4089: subselect and open cursor.
#
#create table t1(a int primary key, b varchar(30)) engine=bdb;
#insert into t1 values (1,'one'), (2,'two'), (3,'three'), (4,'four');
#create table t2 like t1;
#insert into t2 (a, b)
# select a, b from t1 where (a, b) in (select a, b from t1);
#select * from t2;
#drop table t1, t2;
......@@ -481,13 +481,13 @@ int ha_archive::update_row(const byte * old_data, byte * new_data)
{
DBUG_ENTER("ha_archive::update_row");
DBUG_RETURN(HA_ERR_NOT_IMPLEMENTED);
DBUG_RETURN(HA_ERR_WRONG_COMMAND);
}
int ha_archive::delete_row(const byte * buf)
{
DBUG_ENTER("ha_archive::delete_row");
DBUG_RETURN(HA_ERR_NOT_IMPLEMENTED);
DBUG_RETURN(HA_ERR_WRONG_COMMAND);
}
int ha_archive::index_read(byte * buf, const byte * key,
......@@ -496,7 +496,7 @@ int ha_archive::index_read(byte * buf, const byte * key,
__attribute__((unused)))
{
DBUG_ENTER("ha_archive::index_read");
DBUG_RETURN(HA_ERR_NOT_IMPLEMENTED);
DBUG_RETURN(HA_ERR_WRONG_COMMAND);
}
int ha_archive::index_read_idx(byte * buf, uint index, const byte * key,
......@@ -505,32 +505,32 @@ int ha_archive::index_read_idx(byte * buf, uint index, const byte * key,
__attribute__((unused)))
{
DBUG_ENTER("ha_archive::index_read_idx");
DBUG_RETURN(HA_ERR_NOT_IMPLEMENTED);
DBUG_RETURN(HA_ERR_WRONG_COMMAND);
}
int ha_archive::index_next(byte * buf)
{
DBUG_ENTER("ha_archive::index_next");
DBUG_RETURN(HA_ERR_NOT_IMPLEMENTED);
DBUG_RETURN(HA_ERR_WRONG_COMMAND);
}
int ha_archive::index_prev(byte * buf)
{
DBUG_ENTER("ha_archive::index_prev");
DBUG_RETURN(HA_ERR_NOT_IMPLEMENTED);
DBUG_RETURN(HA_ERR_WRONG_COMMAND);
}
int ha_archive::index_first(byte * buf)
{
DBUG_ENTER("ha_archive::index_first");
DBUG_RETURN(HA_ERR_NOT_IMPLEMENTED);
DBUG_RETURN(HA_ERR_WRONG_COMMAND);
}
int ha_archive::index_last(byte * buf)
{
DBUG_ENTER("ha_archive::index_last");
DBUG_RETURN(HA_ERR_NOT_IMPLEMENTED);
DBUG_RETURN(HA_ERR_WRONG_COMMAND);
}
......@@ -581,6 +581,6 @@ ha_rows ha_archive::records_in_range(int inx,
enum ha_rkey_function end_search_flag)
{
DBUG_ENTER("ha_archive::records_in_range ");
DBUG_RETURN(records); // HA_ERR_NOT_IMPLEMENTED
DBUG_RETURN(records); // HA_ERR_WRONG_COMMAND
}
#endif /* HAVE_ARCHIVE_DB */
......@@ -22,7 +22,7 @@
/*
Please read ha_archive.cc first. If you are looking for more general
answers on how storage engines work, look at ha_example.cc and
answers on how storage engines work, look at ha_example.cc and
ha_example.h.
*/
......@@ -36,7 +36,7 @@ typedef struct st_archive_share {
bool dirty; /* Flag for if a flush should occur */
} ARCHIVE_SHARE;
/*
/*
Version for file format.
1 - Initial Version
*/
......@@ -61,7 +61,7 @@ public:
/* The size of the offset value we will use for position() */
ref_length = sizeof(z_off_t);
}
~ha_archive()
~ha_archive()
{
}
const char *table_type() const { return "ARCHIVE"; }
......@@ -69,21 +69,18 @@ public:
const char **bas_ext() const;
ulong table_flags() const
{
return (HA_REC_NOT_IN_SEQ | HA_NOT_EXACT_COUNT | HA_NO_WRITE_DELAYED |
HA_NO_AUTO_INCREMENT);
return (HA_REC_NOT_IN_SEQ | HA_NOT_EXACT_COUNT | HA_NO_AUTO_INCREMENT |
HA_FILE_BASED);
}
ulong index_flags(uint inx) const
ulong index_flags(uint idx, uint part) const
{
return 0;
}
/*
This is just a default, there is no real limit as far as
/*
Have to put something here, there is no real limit as far as
archive is concerned.
*/
uint max_record_length() const { return HA_MAX_REC_LENGTH; }
uint max_keys() const { return 0; }
uint max_key_parts() const { return 0; }
uint max_key_length() const { return 0; }
uint max_supported_record_length() const { return UINT_MAX; }
/*
Called in test_quick_select to determine if indexes should be used.
*/
......
This diff is collapsed.
......@@ -14,7 +14,7 @@
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
/*
/*
Please read ha_exmple.cc before reading this file.
Please keep in mind that the example storage engine implements all methods
that are required to be implemented. handler.h has a full list of methods
......@@ -48,55 +48,68 @@ public:
ha_example(TABLE *table): handler(table)
{
}
~ha_example()
~ha_example()
{
}
/* The name that will be used for display purposes */
const char *table_type() const { return "EXAMPLE"; }
/* The name of the index type that will be used for display */
const char *index_type(uint inx) { return "NONE"; }
const char *table_type() const { return "EXAMPLE"; }
/*
The name of the index type that will be used for display
don't implement this method unless you really have indexes
*/
const char *index_type(uint inx) { return "HASH"; }
const char **bas_ext() const;
/*
This is a list of flags that says what the storage engine
/*
This is a list of flags that says what the storage engine
implements. The current table flags are documented in
table_flags.
handler.h
*/
ulong table_flags() const
{
return 0;
}
/*
This is a list of flags that says how the storage engine
/*
This is a list of flags that says how the storage engine
implements indexes. The current index flags are documented in
handler.h. If you do not implement indexes, just return zero
handler.h. If you do not implement indexes, just return zero
here.
*/
ulong index_flags(uint inx) const
ulong index_flags(uint inx, uint part) const
{
return 0;
}
/*
/*
unireg.cc will call the following to make sure that the storage engine can
handle the data it is about to send.
Return *real* limits of your storage engine here. MySQL will do
min(your_limits, MySQL_limits) automatically
There is no need to implement ..._key_... methods if you don't suport
indexes.
*/
uint max_record_length() const { return HA_MAX_REC_LENGTH; }
uint max_keys() const { return 0; }
uint max_key_parts() const { return 0; }
uint max_key_length() const { return 0; }
uint max_supported_record_length() const { return HA_MAX_REC_LENGTH; }
uint max_supported_keys() const { return 0; }
uint max_supported_key_parts() const { return 0; }
uint max_supported_key_length() const { return 0; }
/*
Called in test_quick_select to determine if indexes should be used.
*/
virtual double scan_time() { return (double) (records+deleted) / 20.0+10; }
/*
/*
The next method will never be called if you do not implement indexes.
*/
virtual double read_time(ha_rows rows) { return (double) rows / 20.0+1; }
/*
/*
Everything below are methods that we implment in ha_example.cc.
Most of these methods are not obligatory, skip them and
MySQL will treat them as not implemented
*/
int open(const char *name, int mode, uint test_if_locked);
int close(void);
int open(const char *name, int mode, uint test_if_locked); // required
int close(void); // required
int write_row(byte * buf);
int update_row(const byte * old_data, byte * new_data);
int delete_row(const byte * buf);
......@@ -108,21 +121,32 @@ public:
int index_prev(byte * buf);
int index_first(byte * buf);
int index_last(byte * buf);
int rnd_init(bool scan=1);
int rnd_next(byte *buf);
int rnd_pos(byte * buf, byte *pos);
void position(const byte *record);
void info(uint);
/*
unlike index_init(), rnd_init() can be called two times
without rnd_end() in between (it only makes sense if scan=1).
then the second call should prepare for the new table scan
(e.g if rnd_init allocates the cursor, second call should
position it to the start of the table, no need to deallocate
and allocate it again
*/
int rnd_init(bool scan); //required
int rnd_end();
int rnd_next(byte *buf); //required
int rnd_pos(byte * buf, byte *pos); //required
void position(const byte *record); //required
void info(uint); //required
int extra(enum ha_extra_function operation);
int reset(void);
int external_lock(THD *thd, int lock_type);
int external_lock(THD *thd, int lock_type); //required
int delete_all_rows(void);
ha_rows records_in_range(uint inx, key_range *min_key,
key_range *max_key);
int delete_table(const char *from);
int rename_table(const char * from, const char * to);
int create(const char *name, TABLE *form, HA_CREATE_INFO *create_info);
int create(const char *name, TABLE *form,
HA_CREATE_INFO *create_info); //required
THR_LOCK_DATA **store_lock(THD *thd, THR_LOCK_DATA **to,
enum thr_lock_type lock_type);
enum thr_lock_type lock_type); //required
};
......@@ -1583,7 +1583,7 @@ int ha_berkeley::index_last(byte * buf)
int ha_berkeley::rnd_init(bool scan)
{
DBUG_ENTER("rnd_init");
DBUG_ASSERT(active_index==MAX_KEY);
//DBUG_ASSERT(active_index==MAX_KEY);
current_row.flags=DB_DBT_REALLOC;
DBUG_RETURN(index_init(primary_key));
}
......
......@@ -88,7 +88,7 @@ class ha_berkeley: public handler
public:
ha_berkeley(TABLE *table): handler(table), alloc_ptr(0),rec_buff(0), file(0),
int_table_flags(HA_REC_NOT_IN_SEQ | HA_FAST_KEY_READ |
HA_NULL_IN_KEY | HA_BLOB_KEY | HA_NOT_EXACT_COUNT |
HA_NULL_IN_KEY | HA_CAN_INDEX_BLOBS | HA_NOT_EXACT_COUNT |
HA_PRIMARY_KEY_IN_READ_INDEX | HA_FILE_BASED |
HA_AUTO_PART_KEY | HA_TABLE_SCAN_ON_INDEX),
changed_rows(0),last_dup_key((uint) -1),version(0),using_ignore(0) {}
......
......@@ -3872,12 +3872,8 @@ JOIN::join_free(bool full)
{
for (tab= join_tab, end= tab+tables; tab != end; tab++)
{
if (tab->table)
{
/* Don't free index if we are using read_record */
if (tab->table->file->inited==handler::RND)
if (tab->table && tab->table->file->inited == handler::RND)
tab->table->file->ha_rnd_end();
}
}
}
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment