Commit bef95a4b authored by Michael Widenius's avatar Michael Widenius

-Run test suite with smaller aria keybuffer size (to make it possible to run...

-Run test suite with smaller aria keybuffer size (to make it possible to run more tests in parallel)
-Added test and extra code to ensure we don't leave keyread on for a handler table.
-Create on disk temporary files always with long data pointers if SQL_SMALL_RESULT is not used. This ensures that we can handle temporary files bigger than 4G.

mysql-test/include/default_mysqld.cnf:
  Run test suite with smaller aria keybuffer size
mysql-test/suite/maria/maria3.result:
  Run test suite with smaller aria keybuffer size
mysql-test/suite/sys_vars/r/aria_pagecache_buffer_size_basic.result:
  Run test suite with smaller aria keybuffer size
sql/handler.cc:
  Disable key read (extra safety if something went wrong)
sql/multi_range_read.cc:
  Ensure we have don't leave keyread on for secondary_file
sql/opt_range.cc:
  Simplify code with mark_columns_used_by_index_no_reset()
  Ensure that read_keys_and_merge() disableds keyread if it enables it
sql/opt_subselect.cc:
  Remove not anymore used argument for create_internal_tmp_table()
sql/sql_derived.cc:
  Remove not anymore used argument for create_internal_tmp_table()
sql/sql_select.cc:
  Use 'enable_keyread()' instead of calling HA_EXTRA_RESET. (Makes debugging easier)
  Create on disk temporary files always with long data pointers if SQL_SMALL_RESULT is not used. This ensures that we can handle temporary files bigger than 4G.
  Remove not anymore used argument for create_internal_tmp_table()
  More DBUG
sql/sql_select.h:
  Remove not anymore used argument for create_internal_tmp_table()
parent 33ef9937
......@@ -35,6 +35,7 @@ log-bin-trust-function-creators=1
key_buffer_size= 1M
sort_buffer= 256K
max_heap_table_size= 1M
loose-aria-pagecache-buffer-size=8M
loose-feedback-user-info= mysql-test
......
......@@ -312,7 +312,7 @@ aria_log_file_size 4294959104
aria_log_purge_type immediate
aria_max_sort_file_size 9223372036853727232
aria_pagecache_age_threshold 300
aria_pagecache_buffer_size 134217728
aria_pagecache_buffer_size 8388608
aria_pagecache_division_limit 100
aria_page_checksum OFF
aria_recover NORMAL
......
select @@global.aria_pagecache_buffer_size;
@@global.aria_pagecache_buffer_size
134217728
8388608
select @@session.aria_pagecache_buffer_size;
ERROR HY000: Variable 'aria_pagecache_buffer_size' is a GLOBAL variable
show global variables like 'aria_pagecache_buffer_size';
Variable_name Value
aria_pagecache_buffer_size 134217728
aria_pagecache_buffer_size 8388608
show session variables like 'aria_pagecache_buffer_size';
Variable_name Value
aria_pagecache_buffer_size 134217728
aria_pagecache_buffer_size 8388608
select * from information_schema.global_variables where variable_name='aria_pagecache_buffer_size';
VARIABLE_NAME VARIABLE_VALUE
ARIA_PAGECACHE_BUFFER_SIZE 134217728
ARIA_PAGECACHE_BUFFER_SIZE 8388608
select * from information_schema.session_variables where variable_name='aria_pagecache_buffer_size';
VARIABLE_NAME VARIABLE_VALUE
ARIA_PAGECACHE_BUFFER_SIZE 134217728
ARIA_PAGECACHE_BUFFER_SIZE 8388608
set global aria_pagecache_buffer_size=1;
ERROR HY000: Variable 'aria_pagecache_buffer_size' is a read only variable
set session aria_pagecache_buffer_size=1;
......
......@@ -2757,6 +2757,7 @@ void handler::get_auto_increment(ulonglong offset, ulonglong increment,
{
/* This should never happen, assert in debug, and fail in release build */
DBUG_ASSERT(0);
(void) extra(HA_EXTRA_NO_KEYREAD);
*first_value= ULONGLONG_MAX;
return;
}
......
......@@ -1114,6 +1114,7 @@ void DsMrr_impl::close_second_handler()
{
if (secondary_file)
{
secondary_file->extra(HA_EXTRA_NO_KEYREAD);
secondary_file->ha_index_or_rnd_end();
secondary_file->ha_external_lock(current_thd, F_UNLCK);
secondary_file->ha_close();
......
......@@ -2069,30 +2069,16 @@ end:
org_key_read= head->key_read;
head->file= file;
head->key_read= 0;
head->mark_columns_used_by_index_no_reset(index, head->read_set);
if (!head->no_keyread)
{
doing_key_read= 1;
head->mark_columns_used_by_index_no_reset(index, head->read_set);
head->enable_keyread();
}
head->prepare_for_position();
if (head->no_keyread)
{
/*
We can get here when doing multi-table delete and having index_merge
condition on a table that we're deleting from. It probably doesn't make
sense to use index_merge, but de-facto it is used.
When it is used, we need to index columns to be read (before maria-5.3,
read_multi_range_first() would set it).
We shouldn't call mark_columns_used_by_index(), because it calls
enable_keyread(), which is not allowed.
*/
head->mark_columns_used_by_index_no_reset(index, head->read_set);
}
head->file= org_file;
head->key_read= org_key_read;
......@@ -10598,12 +10584,13 @@ int read_keys_and_merge_scans(THD *thd,
Unique *unique= *unique_ptr;
handler *file= head->file;
bool with_cpk_filter= pk_quick_select != NULL;
bool enabled_keyread= 0;
DBUG_ENTER("read_keys_and_merge");
/* We're going to just read rowids. */
if (!head->key_read)
{
enabled_keyread= 1;
head->enable_keyread();
}
head->prepare_for_position();
......@@ -10697,12 +10684,14 @@ int read_keys_and_merge_scans(THD *thd,
/*
index merge currently doesn't support "using index" at all
*/
if (enabled_keyread)
head->disable_keyread();
if (init_read_record(read_record, thd, head, (SQL_SELECT*) 0, 1 , 1, TRUE))
result= 1;
DBUG_RETURN(result);
err:
if (enabled_keyread)
head->disable_keyread();
DBUG_RETURN(1);
}
......
......@@ -4107,7 +4107,7 @@ SJ_TMP_TABLE::create_sj_weedout_tmp_table(THD *thd)
recinfo++;
if (share->db_type() == TMP_ENGINE_HTON)
{
if (create_internal_tmp_table(table, keyinfo, start_recinfo, &recinfo, 0, 0))
if (create_internal_tmp_table(table, keyinfo, start_recinfo, &recinfo, 0))
goto err;
}
if (open_tmp_table(table))
......
......@@ -812,8 +812,7 @@ bool mysql_derived_create(THD *thd, LEX *lex, TABLE_LIST *derived)
result->tmp_table_param.start_recinfo,
&result->tmp_table_param.recinfo,
(unit->first_select()->options |
thd->variables.option_bits | TMP_TABLE_ALL_COLUMNS),
thd->variables.big_tables))
thd->variables.option_bits | TMP_TABLE_ALL_COLUMNS)))
return(TRUE);
}
if (open_tmp_table(table))
......
......@@ -10028,10 +10028,7 @@ make_join_readinfo(JOIN *join, ulonglong options, uint no_jbuf_after)
join_read_system :join_read_const;
if (table->covering_keys.is_set(tab->ref.key) &&
!table->no_keyread)
{
table->key_read=1;
table->file->extra(HA_EXTRA_KEYREAD);
}
table->enable_keyread();
else if ((!jcl || jcl > 4) && !tab->ref.is_access_triggered())
push_index_cond(tab, tab->ref.key);
break;
......@@ -10040,10 +10037,7 @@ make_join_readinfo(JOIN *join, ulonglong options, uint no_jbuf_after)
/* fall through */
if (table->covering_keys.is_set(tab->ref.key) &&
!table->no_keyread)
{
table->key_read=1;
table->file->extra(HA_EXTRA_KEYREAD);
}
table->enable_keyread();
else if ((!jcl || jcl > 4) && !tab->ref.is_access_triggered())
push_index_cond(tab, tab->ref.key);
break;
......@@ -10655,8 +10649,10 @@ void JOIN::cleanup(bool full)
{
if (tab->table)
{
DBUG_PRINT("info", ("close index: %s.%s", tab->table->s->db.str,
tab->table->s->table_name.str));
DBUG_PRINT("info", ("close index: %s.%s alias: %s",
tab->table->s->db.str,
tab->table->s->table_name.str,
tab->table->alias.c_ptr()));
tab->table->file->ha_index_or_rnd_end();
}
}
......@@ -15102,8 +15098,7 @@ create_tmp_table(THD *thd, TMP_TABLE_PARAM *param, List<Item> &fields,
if (share->db_type() == TMP_ENGINE_HTON)
{
if (create_internal_tmp_table(table, param->keyinfo, param->start_recinfo,
&param->recinfo, select_options,
thd->variables.big_tables))
&param->recinfo, select_options))
goto err;
}
if (open_tmp_table(table))
......@@ -15322,7 +15317,7 @@ bool open_tmp_table(TABLE *table)
bool create_internal_tmp_table(TABLE *table, KEY *keyinfo,
ENGINE_COLUMNDEF *start_recinfo,
ENGINE_COLUMNDEF **recinfo,
ulonglong options, my_bool big_tables)
ulonglong options)
{
int error;
MARIA_KEYDEF keydef;
......@@ -15415,7 +15410,8 @@ bool create_internal_tmp_table(TABLE *table, KEY *keyinfo,
}
bzero((char*) &create_info,sizeof(create_info));
if (big_tables && !(options & SELECT_SMALL_RESULT))
/* Use long data format, to ensure we never get a 'table is full' error */
if (!(options & SELECT_SMALL_RESULT))
create_info.data_file_length= ~(ulonglong) 0;
/*
......@@ -15505,7 +15501,7 @@ bool create_internal_tmp_table_from_heap(THD *thd, TABLE *table,
bool create_internal_tmp_table(TABLE *table, KEY *keyinfo,
ENGINE_COLUMNDEF *start_recinfo,
ENGINE_COLUMNDEF **recinfo,
ulonglong options, my_bool big_tables)
ulonglong options)
{
int error;
MI_KEYDEF keydef;
......@@ -15592,7 +15588,7 @@ bool create_internal_tmp_table(TABLE *table, KEY *keyinfo,
MI_CREATE_INFO create_info;
bzero((char*) &create_info,sizeof(create_info));
if (big_tables && !(options & SELECT_SMALL_RESULT))
if (!(options & SELECT_SMALL_RESULT))
create_info.data_file_length= ~(ulonglong) 0;
if ((error=mi_create(share->table_name.str, share->keys, &keydef,
......@@ -15682,8 +15678,7 @@ create_internal_tmp_table_from_heap2(THD *thd, TABLE *table,
if (create_internal_tmp_table(&new_table, table->key_info, start_recinfo,
recinfo,
thd->lex->select_lex.options |
thd->variables.option_bits,
thd->variables.big_tables))
thd->variables.option_bits))
goto err2;
if (open_tmp_table(&new_table))
goto err1;
......@@ -17209,6 +17204,8 @@ join_read_first(JOIN_TAB *tab)
{
int error= 0;
TABLE *table=tab->table;
DBUG_ENTER("join_read_first");
if (table->covering_keys.is_set(tab->index) && !table->no_keyread &&
!table->key_read)
table->enable_keyread();
......@@ -17225,9 +17222,9 @@ join_read_first(JOIN_TAB *tab)
{
if (error != HA_ERR_KEY_NOT_FOUND && error != HA_ERR_END_OF_FILE)
report_error(table, error);
return -1;
DBUG_RETURN(-1);
}
return 0;
DBUG_RETURN(0);
}
......@@ -17247,6 +17244,8 @@ join_read_last(JOIN_TAB *tab)
{
TABLE *table=tab->table;
int error= 0;
DBUG_ENTER("join_read_first");
if (table->covering_keys.is_set(tab->index) && !table->no_keyread &&
!table->key_read)
table->enable_keyread();
......@@ -17260,9 +17259,9 @@ join_read_last(JOIN_TAB *tab)
if (!error)
error= table->file->prepare_index_scan();
if (error || (error= tab->table->file->ha_index_last(tab->table->record[0])))
return report_error(table, error);
DBUG_RETURN(report_error(table, error));
return 0;
DBUG_RETURN(0);
}
......
......@@ -1817,7 +1817,7 @@ bool create_internal_tmp_table_from_heap(THD *thd, TABLE *table,
bool create_internal_tmp_table(TABLE *table, KEY *keyinfo,
ENGINE_COLUMNDEF *start_recinfo,
ENGINE_COLUMNDEF **recinfo,
ulonglong options, my_bool big_tables);
ulonglong options);
bool open_tmp_table(TABLE *table);
void setup_tmp_table_column_bitmaps(TABLE *table, uchar *bitmaps);
double prev_record_reads(POSITION *positions, uint idx, table_map found_ref);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment