Commit a339be0f authored by unknown's avatar unknown

Fixed 32bit issue, reworked error logic for open tables, and redid the repair...

Fixed 32bit issue, reworked error logic for open tables, and redid the repair table code so that it uses the extended optimize table code.


sql/examples/ha_archive.cc:
  Fixed issue with 32bit systems giving warnings on bit shift (this is due to the fix by Jim to change to ha_rows). The error logic for opening a table was reworked after studing up on a reported issue. It has been reworked to create a share in all situations. The repair table will just have to figure everything out or toss its own error. The read only filesystem and permission denied problems were solved. Repair table code now rebuilds with the new optimize table extended code (so it no longer does the work itself).
parent 46f0327e
...@@ -305,12 +305,12 @@ int ha_archive::write_meta_file(File meta_file, ha_rows rows, bool dirty) ...@@ -305,12 +305,12 @@ int ha_archive::write_meta_file(File meta_file, ha_rows rows, bool dirty)
meta_buffer[0]= (uchar)ARCHIVE_CHECK_HEADER; meta_buffer[0]= (uchar)ARCHIVE_CHECK_HEADER;
meta_buffer[1]= (uchar)ARCHIVE_VERSION; meta_buffer[1]= (uchar)ARCHIVE_VERSION;
int8store(meta_buffer + 2, rows); int8store(meta_buffer + 2, (ulonglong)rows);
int8store(meta_buffer + 10, check_point); int8store(meta_buffer + 10, check_point);
*(meta_buffer + 18)= (uchar)dirty; *(meta_buffer + 18)= (uchar)dirty;
DBUG_PRINT("ha_archive::write_meta_file", ("Check %d", (uint)ARCHIVE_CHECK_HEADER)); DBUG_PRINT("ha_archive::write_meta_file", ("Check %d", (uint)ARCHIVE_CHECK_HEADER));
DBUG_PRINT("ha_archive::write_meta_file", ("Version %d", (uint)ARCHIVE_VERSION)); DBUG_PRINT("ha_archive::write_meta_file", ("Version %d", (uint)ARCHIVE_VERSION));
DBUG_PRINT("ha_archive::write_meta_file", ("Rows %llu", rows)); DBUG_PRINT("ha_archive::write_meta_file", ("Rows %llu", (ulonglong)rows));
DBUG_PRINT("ha_archive::write_meta_file", ("Checkpoint %llu", check_point)); DBUG_PRINT("ha_archive::write_meta_file", ("Checkpoint %llu", check_point));
DBUG_PRINT("ha_archive::write_meta_file", ("Dirty %d", (uint)dirty)); DBUG_PRINT("ha_archive::write_meta_file", ("Dirty %d", (uint)dirty));
...@@ -326,6 +326,9 @@ int ha_archive::write_meta_file(File meta_file, ha_rows rows, bool dirty) ...@@ -326,6 +326,9 @@ int ha_archive::write_meta_file(File meta_file, ha_rows rows, bool dirty)
/* /*
We create the shared memory space that we will use for the open table. We create the shared memory space that we will use for the open table.
No matter what we try to get or create a share. This is so that a repair
table operation can occur.
See ha_example.cc for a longer description. See ha_example.cc for a longer description.
*/ */
ARCHIVE_SHARE *ha_archive::get_share(const char *table_name, TABLE *table) ARCHIVE_SHARE *ha_archive::get_share(const char *table_name, TABLE *table)
...@@ -363,7 +366,7 @@ ARCHIVE_SHARE *ha_archive::get_share(const char *table_name, TABLE *table) ...@@ -363,7 +366,7 @@ ARCHIVE_SHARE *ha_archive::get_share(const char *table_name, TABLE *table)
*/ */
VOID(pthread_mutex_init(&share->mutex,MY_MUTEX_INIT_FAST)); VOID(pthread_mutex_init(&share->mutex,MY_MUTEX_INIT_FAST));
if ((share->meta_file= my_open(meta_file_name, O_RDWR, MYF(0))) == -1) if ((share->meta_file= my_open(meta_file_name, O_RDWR, MYF(0))) == -1)
goto error; share->crashed= TRUE;
/* /*
After we read, we set the file to dirty. When we close, we will do the After we read, we set the file to dirty. When we close, we will do the
...@@ -381,27 +384,14 @@ ARCHIVE_SHARE *ha_archive::get_share(const char *table_name, TABLE *table) ...@@ -381,27 +384,14 @@ ARCHIVE_SHARE *ha_archive::get_share(const char *table_name, TABLE *table)
that is shared amoung all open tables. that is shared amoung all open tables.
*/ */
if ((share->archive_write= gzopen(share->data_file_name, "ab")) == NULL) if ((share->archive_write= gzopen(share->data_file_name, "ab")) == NULL)
goto error2; share->crashed= TRUE;
if (my_hash_insert(&archive_open_tables, (byte*) share)) VOID(my_hash_insert(&archive_open_tables, (byte*) share));
goto error3;
thr_lock_init(&share->lock); thr_lock_init(&share->lock);
} }
share->use_count++; share->use_count++;
pthread_mutex_unlock(&archive_mutex); pthread_mutex_unlock(&archive_mutex);
return share; return share;
error3:
/* We close, but ignore errors since we already have errors */
(void)gzclose(share->archive_write);
error2:
my_close(share->meta_file,MYF(0));
error:
pthread_mutex_unlock(&archive_mutex);
VOID(pthread_mutex_destroy(&share->mutex));
my_free((gptr) share, MYF(0));
return NULL;
} }
...@@ -458,13 +448,14 @@ int ha_archive::open(const char *name, int mode, uint test_if_locked) ...@@ -458,13 +448,14 @@ int ha_archive::open(const char *name, int mode, uint test_if_locked)
DBUG_ENTER("ha_archive::open"); DBUG_ENTER("ha_archive::open");
if (!(share= get_share(name, table))) if (!(share= get_share(name, table)))
DBUG_RETURN(-1); DBUG_RETURN(HA_ERR_OUT_OF_MEM); // Not handled well by calling code!
thr_lock_data_init(&share->lock,&lock,NULL); thr_lock_data_init(&share->lock,&lock,NULL);
if ((archive= gzopen(share->data_file_name, "rb")) == NULL) if ((archive= gzopen(share->data_file_name, "rb")) == NULL)
{ {
(void)free_share(share); //We void since we already have an error if (errno == EROFS || errno == EACCES)
DBUG_RETURN(errno ? errno : -1); DBUG_RETURN(my_errno= errno);
DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE);
} }
DBUG_RETURN(0); DBUG_RETURN(0);
...@@ -803,68 +794,20 @@ int ha_archive::rnd_pos(byte * buf, byte *pos) ...@@ -803,68 +794,20 @@ int ha_archive::rnd_pos(byte * buf, byte *pos)
/* /*
This method repairs the meta file. It does this by walking the datafile and This method repairs the meta file. It does this by walking the datafile and
rewriting the meta file. rewriting the meta file. Currently it does this by calling optimize with
the extended flag.
*/ */
int ha_archive::repair(THD* thd, HA_CHECK_OPT* check_opt) int ha_archive::repair(THD* thd, HA_CHECK_OPT* check_opt)
{ {
int rc;
byte *buf;
ha_rows rows_recorded= 0;
gzFile rebuild_file; // Archive file we are working with
File meta_file; // Meta file we use
char data_file_name[FN_REFLEN];
DBUG_ENTER("ha_archive::repair"); DBUG_ENTER("ha_archive::repair");
check_opt->flags= T_EXTEND;
int rc= optimize(thd, check_opt);
/* if (rc)
Open up the meta file to recreate it. DBUG_RETURN(HA_ERR_CRASHED_ON_REPAIR);
*/
fn_format(data_file_name, share->table_name, "", ARZ,
MY_REPLACE_EXT|MY_UNPACK_FILENAME);
if ((rebuild_file= gzopen(data_file_name, "rb")) == NULL)
DBUG_RETURN(errno ? errno : -1);
if ((rc= read_data_header(rebuild_file)))
goto error;
/*
We malloc up the buffer we will use for counting the rows.
I know, this malloc'ing memory but this should be a very
rare event.
*/
if (!(buf= (byte*) my_malloc(table->s->rec_buff_length > sizeof(ulonglong) +1 ?
table->s->rec_buff_length : sizeof(ulonglong) +1 ,
MYF(MY_WME))))
{
rc= HA_ERR_CRASHED_ON_USAGE;
goto error;
}
while (!(rc= get_row(rebuild_file, buf)))
rows_recorded++;
/*
Only if we reach the end of the file do we assume we can rewrite.
At this point we reset rc to a non-message state.
*/
if (rc == HA_ERR_END_OF_FILE)
{
fn_format(data_file_name,share->table_name,"",ARM,MY_REPLACE_EXT|MY_UNPACK_FILENAME);
if ((meta_file= my_open(data_file_name, O_RDWR, MYF(0))) == -1)
{
rc= HA_ERR_CRASHED_ON_USAGE;
goto error;
}
(void)write_meta_file(meta_file, rows_recorded, TRUE);
my_close(meta_file,MYF(0));
rc= 0;
}
my_free((gptr) buf, MYF(0));
share->crashed= FALSE; share->crashed= FALSE;
error: DBUG_RETURN(0);
gzclose(rebuild_file);
DBUG_RETURN(rc);
} }
/* /*
...@@ -925,8 +868,14 @@ int ha_archive::optimize(THD* thd, HA_CHECK_OPT* check_opt) ...@@ -925,8 +868,14 @@ int ha_archive::optimize(THD* thd, HA_CHECK_OPT* check_opt)
insert it into the new archive file. insert it into the new archive file.
*/ */
if (!rc) if (!rc)
{
share->rows_recorded= 0;
while (!(rc= get_row(archive, buf))) while (!(rc= get_row(archive, buf)))
{
real_write_row(buf, writer); real_write_row(buf, writer);
share->rows_recorded++;
}
}
my_free(buf, MYF(0)); my_free(buf, MYF(0));
if (rc && rc != HA_ERR_END_OF_FILE) if (rc && rc != HA_ERR_END_OF_FILE)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment