Commit 16ce188d authored by unknown's avatar unknown

Bug#11824 - internal /tmp/*.{MYD,MYI} files remain, causing subsequent queries to fail

Very complex select statements can create temporary tables
that are too big to be represented as a MyISAM table.

This was not checked at table creation time, but only at
open time. The result was an attempt to delete the 
"impossible" table.

But if the server is built --with-raid, MyISAM tries to 
open the table before deleting the files. It needs to find 
out if the table uses the raid support and how many raid 
chunks there are. This is done with an open "for repair",
which will almost always succeed.

But in this case we have an "impossible" table. The open
failed. Hence the files were not deleted. Also the error
message was a bit unspecific.

I turned an open error in this situation into the assumption 
of having no raid support on the table. Thus the normal data 
file is tried to be deleted. This may however leave existing 
raid chunks behind.

I also added a check in mi_create() to prevent the creation
of an "impossible" table. A more decriptive error message is
given in this case.

No test case. The required select statement is way too
large for the test suite. I added a test script to the
bug report.


myisam/mi_create.c:
  Bug#11824 - internal /tmp/*.{MYD,MYI} files remain, causing subsequent queries to fail
  Added a check to mi_create() that the table description
  header of the index file does not exceed 64KB. The header
  has only 16 bits to encode its length.
myisam/mi_delete_table.c:
  Bug#11824 - internal /tmp/*.{MYD,MYI} files remain, causing subsequent queries to fail
  Interpret error in table open as not having a raid
  configuration on the tbale. Thus try to delete the
  normal data file, but leave behind raid chunks if 
  they exist.
parent 89e41595
......@@ -59,6 +59,8 @@ int mi_create(const char *name,uint keys,MI_KEYDEF *keydefs,
my_off_t key_root[MI_MAX_POSSIBLE_KEY],key_del[MI_MAX_KEY_BLOCK_SIZE];
MI_CREATE_INFO tmp_create_info;
DBUG_ENTER("mi_create");
DBUG_PRINT("enter", ("keys: %u columns: %u uniques: %u flags: %u",
keys, columns, uniques, flags));
if (!ci)
{
......@@ -447,6 +449,16 @@ int mi_create(const char *name,uint keys,MI_KEYDEF *keydefs,
uniques * MI_UNIQUEDEF_SIZE +
(key_segs + unique_key_parts)*HA_KEYSEG_SIZE+
columns*MI_COLUMNDEF_SIZE);
DBUG_PRINT("info", ("info_length: %u", info_length));
/* There are only 16 bits for the total header length. */
if (info_length > 65535)
{
my_printf_error(0, "MyISAM table '%s' has too many columns and/or "
"indexes and/or unique constraints.",
MYF(0), name + dirname_length(name));
my_errno= HA_WRONG_CREATE_OPTION;
goto err;
}
bmove(share.state.header.file_version,(byte*) myisam_file_magic,4);
ci->old_options=options| (ci->old_options & HA_OPTION_TEMP_COMPRESS_RECORD ?
......@@ -594,6 +606,7 @@ int mi_create(const char *name,uint keys,MI_KEYDEF *keydefs,
errpos=3;
}
DBUG_PRINT("info", ("write state info and base info"));
if (mi_state_info_write(file, &share.state, 2) ||
mi_base_info_write(file, &share.base))
goto err;
......@@ -607,6 +620,7 @@ int mi_create(const char *name,uint keys,MI_KEYDEF *keydefs,
#endif
/* Write key and keyseg definitions */
DBUG_PRINT("info", ("write key and keyseg definitions"));
for (i=0 ; i < share.base.keys - uniques; i++)
{
uint sp_segs=(keydefs[i].flag & HA_SPATIAL) ? 2*SPDIMS : 0;
......@@ -655,6 +669,7 @@ int mi_create(const char *name,uint keys,MI_KEYDEF *keydefs,
}
/* Save unique definition */
DBUG_PRINT("info", ("write unique definitions"));
for (i=0 ; i < share.state.header.uniques ; i++)
{
if (mi_uniquedef_write(file, &uniquedefs[i]))
......@@ -665,6 +680,7 @@ int mi_create(const char *name,uint keys,MI_KEYDEF *keydefs,
goto err;
}
}
DBUG_PRINT("info", ("write field definitions"));
for (i=0 ; i < share.base.fields ; i++)
if (mi_recinfo_write(file, &recinfo[i]))
goto err;
......@@ -679,6 +695,7 @@ int mi_create(const char *name,uint keys,MI_KEYDEF *keydefs,
#endif
/* Enlarge files */
DBUG_PRINT("info", ("enlarge to keystart: %lu", (ulong) share.base.keystart));
if (my_chsize(file,(ulong) share.base.keystart,0,MYF(0)))
goto err;
......
......@@ -34,13 +34,25 @@ int mi_delete_table(const char *name)
#ifdef USE_RAID
{
MI_INFO *info;
/* we use 'open_for_repair' to be able to delete a crashed table */
if (!(info=mi_open(name, O_RDONLY, HA_OPEN_FOR_REPAIR)))
DBUG_RETURN(my_errno);
raid_type = info->s->base.raid_type;
raid_chunks = info->s->base.raid_chunks;
/*
When built with RAID support, we need to determine if this table
makes use of the raid feature. If yes, we need to remove all raid
chunks. This is done with my_raid_delete(). Unfortunately it is
necessary to open the table just to check this. We use
'open_for_repair' to be able to open even a crashed table. If even
this open fails, we assume no raid configuration for this table
and try to remove the normal data file only. This may however
leave the raid chunks behind.
*/
if (!(info= mi_open(name, O_RDONLY, HA_OPEN_FOR_REPAIR)))
raid_type= 0;
else
{
raid_type= info->s->base.raid_type;
raid_chunks= info->s->base.raid_chunks;
mi_close(info);
}
}
#ifdef EXTRA_DEBUG
check_table_is_closed(name,"delete");
#endif
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment