Commit 6a503b67 authored by unknown's avatar unknown

Merge moonlight.intranet:/home/tomash/src/mysql_ab/mysql-5.0

into  moonlight.intranet:/home/tomash/src/mysql_ab/mysql-5.0-bug21915


sql/mysql_priv.h:
  Auto merged
parents 26770e3a b372ebc5
...@@ -131,6 +131,8 @@ MY_LOCALE *my_locale_by_name(const char *name); ...@@ -131,6 +131,8 @@ MY_LOCALE *my_locale_by_name(const char *name);
#define MAX_ACCEPT_RETRY 10 // Test accept this many times #define MAX_ACCEPT_RETRY 10 // Test accept this many times
#define MAX_FIELDS_BEFORE_HASH 32 #define MAX_FIELDS_BEFORE_HASH 32
#define USER_VARS_HASH_SIZE 16 #define USER_VARS_HASH_SIZE 16
#define TABLE_OPEN_CACHE_MIN 64
#define TABLE_OPEN_CACHE_DEFAULT 64
/* /*
Value of 9236 discovered through binary search 2006-09-26 on Ubuntu Dapper Value of 9236 discovered through binary search 2006-09-26 on Ubuntu Dapper
......
...@@ -2641,19 +2641,43 @@ static int init_common_variables(const char *conf_file_name, int argc, ...@@ -2641,19 +2641,43 @@ static int init_common_variables(const char *conf_file_name, int argc,
/* connections and databases needs lots of files */ /* connections and databases needs lots of files */
{ {
uint files, wanted_files; uint files, wanted_files, max_open_files;
wanted_files= 10+(uint) max(max_connections*5, /* MyISAM requires two file handles per table. */
max_connections+table_cache_size*2); wanted_files= 10+max_connections+table_cache_size*2;
set_if_bigger(wanted_files, open_files_limit); /*
files= my_set_max_open_files(wanted_files); We are trying to allocate no less than max_connections*5 file
handles (i.e. we are trying to set the limit so that they will
be available). In addition, we allocate no less than how much
was already allocated. However below we report a warning and
recompute values only if we got less file handles than were
explicitly requested. No warning and re-computation occur if we
can't get max_connections*5 but still got no less than was
requested (value of wanted_files).
*/
max_open_files= max(max(wanted_files, max_connections*5),
open_files_limit);
files= my_set_max_open_files(max_open_files);
if (files < wanted_files) if (files < wanted_files)
{ {
if (!open_files_limit) if (!open_files_limit)
{ {
max_connections= (ulong) min((files-10),max_connections); /*
table_cache_size= (ulong) max((files-10-max_connections)/2,64); If we have requested too much file handles than we bring
max_connections in supported bounds.
*/
max_connections= (ulong) min(files-10-TABLE_OPEN_CACHE_MIN*2,
max_connections);
/*
Decrease table_cache_size according to max_connections, but
not below TABLE_OPEN_CACHE_MIN. Outer min() ensures that we
never increase table_cache_size automatically (that could
happen if max_connections is decreased above).
*/
table_cache_size= (ulong) min(max((files-10-max_connections)/2,
TABLE_OPEN_CACHE_MIN),
table_cache_size);
DBUG_PRINT("warning", DBUG_PRINT("warning",
("Changed limits: max_open_files: %u max_connections: %ld table_cache: %ld", ("Changed limits: max_open_files: %u max_connections: %ld table_cache: %ld",
files, max_connections, table_cache_size)); files, max_connections, table_cache_size));
...@@ -5943,8 +5967,8 @@ The minimum value for this variable is 4096.", ...@@ -5943,8 +5967,8 @@ The minimum value for this variable is 4096.",
0, 0, 0, 0}, 0, 0, 0, 0},
{"table_cache", OPT_TABLE_CACHE, {"table_cache", OPT_TABLE_CACHE,
"The number of open tables for all threads.", (gptr*) &table_cache_size, "The number of open tables for all threads.", (gptr*) &table_cache_size,
(gptr*) &table_cache_size, 0, GET_ULONG, REQUIRED_ARG, 64, 1, 512*1024L, (gptr*) &table_cache_size, 0, GET_ULONG, REQUIRED_ARG,
0, 1, 0}, TABLE_OPEN_CACHE_DEFAULT, 1, 512*1024L, 0, 1, 0},
{"table_lock_wait_timeout", OPT_TABLE_LOCK_WAIT_TIMEOUT, "Timeout in " {"table_lock_wait_timeout", OPT_TABLE_LOCK_WAIT_TIMEOUT, "Timeout in "
"seconds to wait for a table level lock before returning an error. Used" "seconds to wait for a table level lock before returning an error. Used"
" only if the connection has active cursors.", " only if the connection has active cursors.",
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment