Commit 7f681996 authored by monty@work.mysql.com's avatar monty@work.mysql.com

merge

parents 6a8fce39 11e56722
This diff is collapsed.
......@@ -16,7 +16,7 @@
/* By Jani Tolonen, 2001-04-20, MySQL Development Team */
#define CHECK_VERSION "1.01"
#define CHECK_VERSION "1.02"
#include <global.h>
#include <my_sys.h>
......@@ -503,25 +503,24 @@ static int use_db(char *database)
static int handle_request_for_tables(char *tables, uint length)
{
char *query, *end, options[100];
char *query, *end, options[100], message[100];
const char *op = 0;
options[0] = 0;
end = options;
switch (what_to_do) {
case DO_CHECK:
op = "CHECK";
end = options;
if (opt_quick) end = strmov(end, "QUICK");
if (opt_fast) end = strmov(end, "FAST");
if (opt_medium_check) end = strmov(end, "MEDIUM"); /* Default */
if (opt_extended) end = strmov(end, "EXTENDED");
if (opt_check_only_changed) end = strmov(end, "CHANGED");
if (opt_quick) end = strmov(end, " QUICK");
if (opt_fast) end = strmov(end, " FAST");
if (opt_medium_check) end = strmov(end, " MEDIUM"); /* Default */
if (opt_extended) end = strmov(end, " EXTENDED");
if (opt_check_only_changed) end = strmov(end, " CHANGED");
break;
case DO_REPAIR:
op = "REPAIR";
end = options;
if (opt_quick) end = strmov(end, "QUICK");
if (opt_extended) end = strmov(end, "EXTENDED");
if (opt_quick) end = strmov(end, " QUICK");
if (opt_extended) end = strmov(end, " EXTENDED");
break;
case DO_ANALYZE:
op = "ANALYZE";
......@@ -533,11 +532,11 @@ static int handle_request_for_tables(char *tables, uint length)
if (!(query =(char *) my_malloc((sizeof(char)*(length+110)), MYF(MY_WME))))
return 1;
sprintf(query, "%s TABLE %s %s", op, options, tables);
sprintf(query, "%s TABLE %s %s", op, tables, options);
if (mysql_query(sock, query))
{
sprintf(options, "when executing '%s TABLE'", op);
DBerror(sock, options);
sprintf(message, "when executing '%s TABLE ... %s", op, options);
DBerror(sock, message);
return 1;
}
print_result();
......@@ -551,23 +550,34 @@ static void print_result()
MYSQL_RES *res;
MYSQL_ROW row;
char prev[NAME_LEN*2+2];
int i;
uint i;
my_bool found_error=0;
res = mysql_use_result(sock);
prev[0] = '\0';
for (i = 0; (row = mysql_fetch_row(res)); i++)
{
int changed = strcmp(prev, row[0]);
int status = !strcmp(row[2], "status");
if (opt_silent && status)
continue;
my_bool status = !strcmp(row[2], "status");
if (status)
{
if (found_error)
{
if (what_to_do != DO_REPAIR && opt_auto_repair &&
(!opt_fast || strcmp(row[3],"OK")))
insert_dynamic(&tables4repair, row[0]);
}
found_error=0;
if (opt_silent)
continue;
}
if (status && changed)
printf("%-50s %s", row[0], row[3]);
else if (!status && changed)
{
printf("%s\n%-9s: %s", row[0], row[2], row[3]);
if (what_to_do != DO_REPAIR && opt_auto_repair)
insert_dynamic(&tables4repair, row[0]);
found_error=1;
}
else
printf("%-9s: %s", row[2], row[3]);
......
......@@ -24,6 +24,7 @@ Created 9/17/2000 Heikki Tuuri
#include "trx0roll.h"
#include "trx0purge.h"
#include "lock0lock.h"
#include "rem0cmp.h"
/***********************************************************************
Reads a MySQL format variable-length field (like VARCHAR) length and
......
......@@ -509,3 +509,5 @@ id id3
1 1
2 2
100 2
KINMU_DATE
KINMU_DATE
......@@ -5,5 +5,11 @@
+9999999999999999999 -9999999999999999999
10000000000000000000 -10000000000000000000
a
18446744073709551614
18446744073709551615
a
18446744073709551615
a
18446744073709551615
a
18446744073709551614
......@@ -7,3 +7,5 @@ isbn city libname a
isbn city libname a
007 Berkeley Berkeley Public1 2
000 New York New York Public Libra 2
f1 count(distinct t2.f2) count(distinct 1,NULL)
1 0 0
......@@ -80,3 +80,13 @@ t1 CREATE TABLE `t1` (
`test_set` set('val1','val2','val3') NOT NULL default '',
`name` char(20) default 'O''Brien'
) TYPE=MyISAM COMMENT='it''s a table'
Table Create Table
t1 CREATE TABLE `t1` (
`a` int(11) NOT NULL default '0',
UNIQUE KEY `aa` (`a`)
) TYPE=MyISAM
Table Create Table
t1 CREATE TABLE `t1` (
`a` int(11) NOT NULL default '0',
PRIMARY KEY (`a`)
) TYPE=MyISAM
......@@ -705,3 +705,13 @@ commit;
select id,id3 from t1;
UNLOCK TABLES;
DROP TABLE t1;
#
# Test with empty tables (crashed with lock error)
#
CREATE TABLE t1 (SYAIN_NO char(5) NOT NULL default '', KINMU_DATE char(6) NOT NULL default '', PRIMARY KEY (SYAIN_NO,KINMU_DATE)) TYPE=BerkeleyDB;
CREATE TABLE t2 ( SYAIN_NO char(5) NOT NULL default '',STR_DATE char(8) NOT NULL default '',PRIMARY KEY (SYAIN_NO,STR_DATE) ) TYPE=BerkeleyDB;
select T1.KINMU_DATE from t1 T1 ,t2 T2 where T1.SYAIN_NO = '12345' and T1.KINMU_DATE = '200106' and T2.SYAIN_NO = T1.SYAIN_NO;
select T1.KINMU_DATE from t1 T1 ,t2 T2 where T1.SYAIN_NO = '12345' and T1.KINMU_DATE = '200106' and T2.SYAIN_NO = T1.SYAIN_NO;
DROP TABLE t1,t2;
......@@ -6,7 +6,11 @@ select 9223372036854775807,-009223372036854775808;
select +9999999999999999999,-9999999999999999999;
drop table if exists t1;
create table t1 (a bigint unsigned);
insert into t1 values (18446744073709551615), (0xFFFFFFFFFFFFFFFF);
create table t1 (a bigint unsigned not null, primary key(a));
insert into t1 values (18446744073709551615), (0xFFFFFFFFFFFFFFFE);
select * from t1;
select * from t1 where a=18446744073709551615;
select * from t1 where a='18446744073709551615';
delete from t1 where a=18446744073709551615;
select * from t1;
drop table t1;
......@@ -32,3 +32,13 @@ insert into t1 values ('NYC Lib','New York');
select t2.isbn,city,t1.libname,count(t1.libname) as a from t3 left join t1 on t3.libname=t1.libname left join t2 on t3.isbn=t2.isbn group by city,t1.libname;
select t2.isbn,city,t1.libname,count(distinct t1.libname) as a from t3 left join t1 on t3.libname=t1.libname left join t2 on t3.isbn=t2.isbn group by city having count(distinct t1.libname) > 1;
drop table t1, t2, t3;
#
# Problem with LEFT JOIN
#
create table t1 (f1 int);
insert into t1 values (1);
create table t2 (f1 int,f2 int);
select t1.f1,count(distinct t2.f2),count(distinct 1,NULL) from t1 left join t2 on t1.f1=t2.f1 group by t1.f1;
drop table t1,t2;
......@@ -65,3 +65,10 @@ create table t1 (
) comment = 'it\'s a table' ;
show create table t1 ;
drop table t1;
create table t1 (a int not null, unique aa (a));
show create table t1;
drop table t1;
create table t1 (a int not null, primary key (a));
show create table t1;
drop table t1;
......@@ -11,24 +11,26 @@
# Another time vacuum() filled our system disk with had 6G free
# while vaccuming a table of 60 M.
#
# We have sent a mail about this to the PostgreSQL mailing list, so
# the PostgreSQL developers should be aware of these problems and should
# hopefully fix this soon.
#
# WARNING
# The test was run on a Intel Xeon 2x 550 Mzh machine with 1G memory,
# 9G hard disk. The OS is Suse 7.1, with Linux 2.4.0 compiled with SMP
# 9G hard disk. The OS is Suse 7.1, with Linux 2.4.2 compiled with SMP
# support
# Both the perl client and the database server is run
# on the same machine. No other cpu intensive process was used during
# the benchmark.
#
# During the test we run PostgreSQL with -o -F, not async mode (not ACID safe)
# because when we started postmaster without -o -F, PostgreSQL log files
# filled up a 9G disk until postmaster crashed.
# We did however notice that with -o -F, PostgreSQL was a magnitude slower
# than when not using -o -F.
# First, install postgresql-7.1.1.tar.gz
#
# First, install postgresql-7.1.2.tar.gz
# Adding the following lines to your ~/.bash_profile or
# corresponding file. If you are using csh, use ´setenv´.
#
export POSTGRES_INCLUDE=/usr/local/pg/include
export POSTGRES_LIB=/usr/local/pg/lib
......@@ -62,7 +64,7 @@ su - postgres
exit
#
# Second, install packages DBD-Pg-1.00.tar.gz and DBI-1.14.tar.gz,
# Second, install packages DBD-Pg-1.00.tar.gz and DBI-1.18.tar.gz,
# available from http://www.perl.com/CPAN/
export POSTGRES_LIB=/usr/local/pg/lib/
......@@ -82,6 +84,7 @@ run-all-tests --comment="Intel Xeon, 2x550 Mhz, 512M, pg started with -o -F" --u
# When running with --fast we run the following vacuum commands on
# the database between each major update of the tables:
# vacuum anlyze table
# vacuum table
# or
# vacuum analyze
......
Testing server 'MySQL 3.23.39' at 2001-06-05 19:26:17
ATIS table test
Creating tables
Time for create_table (28): 0 wallclock secs ( 0.01 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.01 CPU)
Inserting data
Time to insert (9768): 3 wallclock secs ( 0.45 usr 0.44 sys + 0.00 cusr 0.00 csys = 0.89 CPU)
Retrieving data
Time for select_simple_join (500): 3 wallclock secs ( 0.68 usr 0.19 sys + 0.00 cusr 0.00 csys = 0.87 CPU)
Time for select_join (100): 3 wallclock secs ( 0.51 usr 0.20 sys + 0.00 cusr 0.00 csys = 0.71 CPU)
Time for select_key_prefix_join (100): 13 wallclock secs ( 4.08 usr 2.01 sys + 0.00 cusr 0.00 csys = 6.09 CPU)
Time for select_distinct (800): 15 wallclock secs ( 1.75 usr 0.69 sys + 0.00 cusr 0.00 csys = 2.44 CPU)
Time for select_group (2600): 20 wallclock secs ( 1.57 usr 0.41 sys + 0.00 cusr 0.00 csys = 1.98 CPU)
Removing tables
Time to drop_table (28): 0 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Total time: 57 wallclock secs ( 9.06 usr 3.94 sys + 0.00 cusr 0.00 csys = 13.00 CPU)
Benchmark DBD suite: 2.12
Date of test: 2001-06-05 19:27:31
Running tests on: Linux 2.4.0-64GB-SMP i686
Arguments:
Comments: Intel Xeon, 2x550 Mhz, 512M, key_buffer=16M
Limits from: mysql,pg
Server version: MySQL 3.23.39
ATIS: Total time: 57 wallclock secs ( 9.06 usr 3.94 sys + 0.00 cusr 0.00 csys = 13.00 CPU)
alter-table: Total time: 271 wallclock secs ( 0.18 usr 0.02 sys + 0.00 cusr 0.00 csys = 0.20 CPU)
big-tables: Total time: 33 wallclock secs ( 9.40 usr 7.64 sys + 0.00 cusr 0.00 csys = 17.04 CPU)
connect: Total time: 86 wallclock secs (33.98 usr 18.10 sys + 0.00 cusr 0.00 csys = 52.08 CPU)
create: Total time: 103 wallclock secs ( 7.83 usr 3.60 sys + 0.00 cusr 0.00 csys = 11.43 CPU)
insert: Total time: 2736 wallclock secs (661.21 usr 182.47 sys + 0.00 cusr 0.00 csys = 843.68 CPU)
select: Total time: 1949 wallclock secs (70.03 usr 16.42 sys + 0.00 cusr 0.00 csys = 86.45 CPU)
wisconsin: Total time: 19 wallclock secs ( 3.92 usr 1.70 sys + 0.00 cusr 0.00 csys = 5.62 CPU)
All 8 test executed successfully
Totals per operation:
Operation seconds usr sys cpu tests
alter_table_add 261.00 0.13 0.02 0.15 992
connect 16.00 6.84 2.50 9.34 10000
connect+select_1_row 15.00 7.11 3.70 10.81 10000
connect+select_simple 13.00 6.70 3.21 9.91 10000
count 45.00 0.01 0.00 0.01 100
count_distinct 60.00 0.42 0.08 0.50 1000
count_distinct_2 63.00 0.18 0.03 0.21 1000
count_distinct_big 165.00 7.78 3.16 10.94 120
count_distinct_group 194.00 1.21 0.37 1.58 1000
count_distinct_group_on_key 59.00 0.51 0.07 0.58 1000
count_distinct_group_on_key_parts 194.00 1.12 0.46 1.58 1000
count_distinct_key_prefix 51.00 0.45 0.08 0.53 1000
count_group_on_key_parts 58.00 1.16 0.35 1.51 1000
count_on_key 586.00 16.61 2.71 19.32 50100
create+drop 33.00 2.94 0.82 3.76 10000
create_MANY_tables 18.00 1.02 0.62 1.64 5000
create_index 5.00 0.00 0.00 0.00 8
create_key+drop 41.00 3.05 0.66 3.71 10000
create_table 0.00 0.01 0.00 0.01 31
delete_all 17.00 0.00 0.00 0.00 12
delete_all_many_keys 75.00 0.03 0.00 0.03 1
delete_big 1.00 0.00 0.00 0.00 1
delete_big_many_keys 75.00 0.03 0.00 0.03 128
delete_key 4.00 0.76 0.29 1.05 10000
drop_index 5.00 0.00 0.00 0.00 8
drop_table 0.00 0.00 0.00 0.00 28
drop_table_when_MANY_tables 6.00 0.37 0.63 1.00 5000
insert 144.00 24.06 14.28 38.34 350768
insert_duplicates 31.00 5.06 3.72 8.78 100000
insert_key 137.00 9.91 6.26 16.17 100000
insert_many_fields 10.00 0.54 0.08 0.62 2000
insert_select_1_key 7.00 0.00 0.00 0.00 1
insert_select_2_keys 9.00 0.00 0.00 0.00 1
min_max 30.00 0.04 0.01 0.05 60
min_max_on_key 230.00 28.28 4.43 32.71 85000
order_by_big 78.00 22.39 9.83 32.22 10
order_by_big_key 33.00 23.35 10.15 33.50 10
order_by_big_key2 32.00 22.53 9.81 32.34 10
order_by_big_key_desc 36.00 23.47 10.27 33.74 10
order_by_big_key_diff 74.00 22.66 9.76 32.42 10
order_by_big_key_prefix 33.00 22.18 9.81 31.99 10
order_by_key2_diff 9.00 1.30 0.85 2.15 500
order_by_key_prefix 4.00 0.97 0.57 1.54 500
order_by_range 8.00 1.26 0.49 1.75 500
outer_join 110.00 0.00 0.00 0.00 10
outer_join_found 107.00 0.00 0.00 0.00 10
outer_join_not_found 59.00 0.00 0.00 0.00 500
outer_join_on_key 60.00 0.00 0.00 0.00 10
select_1_row 3.00 0.81 0.69 1.50 10000
select_2_rows 3.00 0.67 0.63 1.30 10000
select_big 63.00 32.72 16.55 49.27 10080
select_column+column 4.00 0.52 0.46 0.98 10000
select_diff_key 193.00 0.32 0.04 0.36 500
select_distinct 15.00 1.75 0.69 2.44 800
select_group 75.00 1.59 0.45 2.04 2711
select_group_when_MANY_tables 5.00 0.43 0.87 1.30 5000
select_join 3.00 0.51 0.20 0.71 100
select_key 132.00 53.98 10.53 64.51 200000
select_key2 139.00 78.61 11.08 89.69 200000
select_key2_return_key 131.00 64.58 9.61 74.19 200000
select_key2_return_prim 134.00 72.33 11.34 83.67 200000
select_key_prefix 141.00 86.32 12.05 98.37 200000
select_key_prefix_join 13.00 4.08 2.01 6.09 100
select_key_return_key 125.00 59.92 12.00 71.92 200000
select_many_fields 23.00 8.85 7.55 16.40 2000
select_query_cache 120.00 3.67 0.53 4.20 10000
select_query_cache2 120.00 3.80 0.57 4.37 10000
select_range 201.00 9.05 3.95 13.00 410
select_range_key2 21.00 7.15 1.40 8.55 25010
select_range_prefix 22.00 6.55 1.40 7.95 25010
select_simple 2.00 0.54 0.49 1.03 10000
select_simple_join 3.00 0.68 0.19 0.87 500
update_big 64.00 0.00 0.00 0.00 10
update_of_key 25.00 2.62 1.44 4.06 50000
update_of_key_big 35.00 0.05 0.04 0.09 501
update_of_primary_key_many_keys 47.00 0.01 0.02 0.03 256
update_with_key 119.00 18.44 12.64 31.08 300000
update_with_key_prefix 36.00 6.23 3.85 10.08 100000
wisc_benchmark 5.00 2.33 0.52 2.85 114
TOTALS 5323.00 795.55 233.87 1029.42 2551551
Testing server 'MySQL 3.23.39' at 2001-06-05 13:47:22
Testing of ALTER TABLE
Testing with 1000 columns and 1000 rows in 20 steps
Insert data into the table
Time for insert (1000) 0 wallclock secs ( 0.05 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.05 CPU)
Time for alter_table_add (992): 261 wallclock secs ( 0.13 usr 0.02 sys + 0.00 cusr 0.00 csys = 0.15 CPU)
Time for create_index (8): 5 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time for drop_index (8): 5 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Total time: 271 wallclock secs ( 0.18 usr 0.02 sys + 0.00 cusr 0.00 csys = 0.20 CPU)
Testing server 'MySQL 3.23.39' at 2001-06-05 13:51:53
Testing of some unusual tables
All tests are done 1000 times with 1000 fields
Testing table with 1000 fields
Testing select * from table with 1 record
Time to select_many_fields(1000): 10 wallclock secs ( 4.43 usr 4.17 sys + 0.00 cusr 0.00 csys = 8.60 CPU)
Testing select all_fields from table with 1 record
Time to select_many_fields(1000): 13 wallclock secs ( 4.42 usr 3.38 sys + 0.00 cusr 0.00 csys = 7.80 CPU)
Testing insert VALUES()
Time to insert_many_fields(1000): 3 wallclock secs ( 0.46 usr 0.03 sys + 0.00 cusr 0.00 csys = 0.49 CPU)
Testing insert (all_fields) VALUES()
Time to insert_many_fields(1000): 7 wallclock secs ( 0.08 usr 0.05 sys + 0.00 cusr 0.00 csys = 0.13 CPU)
Total time: 33 wallclock secs ( 9.40 usr 7.64 sys + 0.00 cusr 0.00 csys = 17.04 CPU)
Testing server 'MySQL 3.23.39' at 2001-06-05 13:52:26
Testing the speed of connecting to the server and sending of data
All tests are done 10000 times
Testing connection/disconnect
Time to connect (10000): 16 wallclock secs ( 6.84 usr 2.50 sys + 0.00 cusr 0.00 csys = 9.34 CPU)
Test connect/simple select/disconnect
Time for connect+select_simple (10000): 13 wallclock secs ( 6.70 usr 3.21 sys + 0.00 cusr 0.00 csys = 9.91 CPU)
Test simple select
Time for select_simple (10000): 2 wallclock secs ( 0.54 usr 0.49 sys + 0.00 cusr 0.00 csys = 1.03 CPU)
Testing connect/select 1 row from table/disconnect
Time to connect+select_1_row (10000): 15 wallclock secs ( 7.11 usr 3.70 sys + 0.00 cusr 0.00 csys = 10.81 CPU)
Testing select 1 row from table
Time to select_1_row (10000): 3 wallclock secs ( 0.81 usr 0.69 sys + 0.00 cusr 0.00 csys = 1.50 CPU)
Testing select 2 rows from table
Time to select_2_rows (10000): 3 wallclock secs ( 0.67 usr 0.63 sys + 0.00 cusr 0.00 csys = 1.30 CPU)
Test select with aritmetic (+)
Time for select_column+column (10000): 4 wallclock secs ( 0.52 usr 0.46 sys + 0.00 cusr 0.00 csys = 0.98 CPU)
Testing retrieval of big records (65000 bytes)
Time to select_big (10000): 30 wallclock secs (10.79 usr 6.41 sys + 0.00 cusr 0.00 csys = 17.20 CPU)
Total time: 86 wallclock secs (33.98 usr 18.10 sys + 0.00 cusr 0.00 csys = 52.08 CPU)
Testing server 'MySQL 3.23.39' at 2001-06-05 13:53:52
Testing the speed of creating and droping tables
Testing with 5000 tables and 10000 loop count
Testing create of tables
Time for create_MANY_tables (5000): 18 wallclock secs ( 1.02 usr 0.62 sys + 0.00 cusr 0.00 csys = 1.64 CPU)
Accessing tables
Time to select_group_when_MANY_tables (5000): 5 wallclock secs ( 0.43 usr 0.87 sys + 0.00 cusr 0.00 csys = 1.30 CPU)
Testing drop
Time for drop_table_when_MANY_tables (5000): 6 wallclock secs ( 0.37 usr 0.63 sys + 0.00 cusr 0.00 csys = 1.00 CPU)
Testing create+drop
Time for create+drop (10000): 33 wallclock secs ( 2.94 usr 0.82 sys + 0.00 cusr 0.00 csys = 3.76 CPU)
Time for create_key+drop (10000): 41 wallclock secs ( 3.05 usr 0.66 sys + 0.00 cusr 0.00 csys = 3.71 CPU)
Total time: 103 wallclock secs ( 7.83 usr 3.60 sys + 0.00 cusr 0.00 csys = 11.43 CPU)
Testing server 'MySQL 3.23.39' at 2001-06-05 13:55:36
Testing the speed of inserting data into 1 table and do some selects on it.
The tests are done with a table that has 100000 rows.
Generating random keys
Creating tables
Inserting 100000 rows in order
Inserting 100000 rows in reverse order
Inserting 100000 rows in random order
Time for insert (300000): 123 wallclock secs (21.22 usr 12.32 sys + 0.00 cusr 0.00 csys = 33.54 CPU)
Testing insert of duplicates
Time for insert_duplicates (100000): 31 wallclock secs ( 5.06 usr 3.72 sys + 0.00 cusr 0.00 csys = 8.78 CPU)
Retrieving data from the table
Time for select_big (10:3000000): 32 wallclock secs (21.78 usr 10.07 sys + 0.00 cusr 0.00 csys = 31.85 CPU)
Time for order_by_big_key (10:3000000): 33 wallclock secs (23.35 usr 10.15 sys + 0.00 cusr 0.00 csys = 33.50 CPU)
Time for order_by_big_key_desc (10:3000000): 36 wallclock secs (23.47 usr 10.27 sys + 0.00 cusr 0.00 csys = 33.74 CPU)
Time for order_by_big_key_prefix (10:3000000): 33 wallclock secs (22.18 usr 9.81 sys + 0.00 cusr 0.00 csys = 31.99 CPU)
Time for order_by_big_key2 (10:3000000): 32 wallclock secs (22.53 usr 9.81 sys + 0.00 cusr 0.00 csys = 32.34 CPU)
Time for order_by_big_key_diff (10:3000000): 74 wallclock secs (22.66 usr 9.76 sys + 0.00 cusr 0.00 csys = 32.42 CPU)
Time for order_by_big (10:3000000): 78 wallclock secs (22.39 usr 9.83 sys + 0.00 cusr 0.00 csys = 32.22 CPU)
Time for order_by_range (500:125750): 8 wallclock secs ( 1.26 usr 0.49 sys + 0.00 cusr 0.00 csys = 1.75 CPU)
Time for order_by_key_prefix (500:125750): 4 wallclock secs ( 0.97 usr 0.57 sys + 0.00 cusr 0.00 csys = 1.54 CPU)
Time for order_by_key2_diff (500:250500): 9 wallclock secs ( 1.30 usr 0.85 sys + 0.00 cusr 0.00 csys = 2.15 CPU)
Time for select_diff_key (500:1000): 193 wallclock secs ( 0.32 usr 0.04 sys + 0.00 cusr 0.00 csys = 0.36 CPU)
Time for select_range_prefix (5010:42084): 13 wallclock secs ( 2.55 usr 0.51 sys + 0.00 cusr 0.00 csys = 3.06 CPU)
Time for select_range_key2 (5010:42084): 12 wallclock secs ( 2.81 usr 0.68 sys + 0.00 cusr 0.00 csys = 3.49 CPU)
Time for select_key_prefix (200000): 141 wallclock secs (86.32 usr 12.05 sys + 0.00 cusr 0.00 csys = 98.37 CPU)
Time for select_key (200000): 132 wallclock secs (53.98 usr 10.53 sys + 0.00 cusr 0.00 csys = 64.51 CPU)
Time for select_key_return_key (200000): 125 wallclock secs (59.92 usr 12.00 sys + 0.00 cusr 0.00 csys = 71.92 CPU)
Time for select_key2 (200000): 139 wallclock secs (78.61 usr 11.08 sys + 0.00 cusr 0.00 csys = 89.69 CPU)
Time for select_key2_return_key (200000): 131 wallclock secs (64.58 usr 9.61 sys + 0.00 cusr 0.00 csys = 74.19 CPU)
Time for select_key2_return_prim (200000): 134 wallclock secs (72.33 usr 11.34 sys + 0.00 cusr 0.00 csys = 83.67 CPU)
Test of compares with simple ranges
Time for select_range_prefix (20000:43500): 9 wallclock secs ( 4.00 usr 0.89 sys + 0.00 cusr 0.00 csys = 4.89 CPU)
Time for select_range_key2 (20000:43500): 9 wallclock secs ( 4.34 usr 0.72 sys + 0.00 cusr 0.00 csys = 5.06 CPU)
Time for select_group (111): 55 wallclock secs ( 0.02 usr 0.04 sys + 0.00 cusr 0.00 csys = 0.06 CPU)
Time for min_max_on_key (15000): 8 wallclock secs ( 5.12 usr 0.76 sys + 0.00 cusr 0.00 csys = 5.88 CPU)
Time for min_max (60): 30 wallclock secs ( 0.04 usr 0.01 sys + 0.00 cusr 0.00 csys = 0.05 CPU)
Time for count_on_key (100): 52 wallclock secs ( 0.03 usr 0.02 sys + 0.00 cusr 0.00 csys = 0.05 CPU)
Time for count (100): 45 wallclock secs ( 0.01 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.01 CPU)
Time for count_distinct_big (20): 98 wallclock secs ( 0.01 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.01 CPU)
Testing update of keys with functions
Time for update_of_key (50000): 25 wallclock secs ( 2.62 usr 1.44 sys + 0.00 cusr 0.00 csys = 4.06 CPU)
Time for update_of_key_big (501): 35 wallclock secs ( 0.05 usr 0.04 sys + 0.00 cusr 0.00 csys = 0.09 CPU)
Testing update with key
Time for update_with_key (300000): 119 wallclock secs (18.44 usr 12.64 sys + 0.00 cusr 0.00 csys = 31.08 CPU)
Time for update_with_key_prefix (100000): 36 wallclock secs ( 6.23 usr 3.85 sys + 0.00 cusr 0.00 csys = 10.08 CPU)
Testing update of all rows
Time for update_big (10): 64 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Testing left outer join
Time for outer_join_on_key (10:10): 60 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time for outer_join (10:10): 110 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time for outer_join_found (10:10): 107 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time for outer_join_not_found (500:10): 59 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Testing INSERT INTO ... SELECT
Time for insert_select_1_key (1): 7 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time for insert_select_2_keys (1): 9 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time for drop table(2): 0 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Testing delete
Time for delete_key (10000): 4 wallclock secs ( 0.76 usr 0.29 sys + 0.00 cusr 0.00 csys = 1.05 CPU)
Time for delete_all (12): 17 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Insert into table with 16 keys and with a primary key with 16 parts
Time for insert_key (100000): 137 wallclock secs ( 9.91 usr 6.26 sys + 0.00 cusr 0.00 csys = 16.17 CPU)
Testing update of keys
Time for update_of_primary_key_many_keys (256): 47 wallclock secs ( 0.01 usr 0.02 sys + 0.00 cusr 0.00 csys = 0.03 CPU)
Deleting rows from the table
Time for delete_big_many_keys (128): 75 wallclock secs ( 0.03 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.03 CPU)
Deleting everything from table
Time for delete_all_many_keys (1): 75 wallclock secs ( 0.03 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.03 CPU)
Total time: 2736 wallclock secs (661.21 usr 182.47 sys + 0.00 cusr 0.00 csys = 843.68 CPU)
Testing server 'MySQL 3.23.39' at 2001-06-05 14:41:13
Testing the speed of selecting on keys that consist of many parts
The test-table has 10000 rows and the test is done with 500 ranges.
Creating table
Inserting 10000 rows
Time to insert (10000): 5 wallclock secs ( 0.80 usr 0.34 sys + 0.00 cusr 0.00 csys = 1.14 CPU)
Test if the database has a query cache
Time for select_query_cache (10000): 120 wallclock secs ( 3.67 usr 0.53 sys + 0.00 cusr 0.00 csys = 4.20 CPU)
Time for select_query_cache2 (10000): 120 wallclock secs ( 3.80 usr 0.57 sys + 0.00 cusr 0.00 csys = 4.37 CPU)
Testing big selects on the table
Time for select_big (70:17207): 1 wallclock secs ( 0.15 usr 0.07 sys + 0.00 cusr 0.00 csys = 0.22 CPU)
Time for select_range (410:1057904): 201 wallclock secs ( 9.05 usr 3.95 sys + 0.00 cusr 0.00 csys = 13.00 CPU)
Time for min_max_on_key (70000): 222 wallclock secs (23.16 usr 3.67 sys + 0.00 cusr 0.00 csys = 26.83 CPU)
Time for count_on_key (50000): 534 wallclock secs (16.58 usr 2.69 sys + 0.00 cusr 0.00 csys = 19.27 CPU)
Time for count_group_on_key_parts (1000:100000): 58 wallclock secs ( 1.16 usr 0.35 sys + 0.00 cusr 0.00 csys = 1.51 CPU)
Testing count(distinct) on the table
Time for count_distinct_key_prefix (1000:1000): 51 wallclock secs ( 0.45 usr 0.08 sys + 0.00 cusr 0.00 csys = 0.53 CPU)
Time for count_distinct (1000:1000): 60 wallclock secs ( 0.42 usr 0.08 sys + 0.00 cusr 0.00 csys = 0.50 CPU)
Time for count_distinct_2 (1000:1000): 63 wallclock secs ( 0.18 usr 0.03 sys + 0.00 cusr 0.00 csys = 0.21 CPU)
Time for count_distinct_group_on_key (1000:6000): 59 wallclock secs ( 0.51 usr 0.07 sys + 0.00 cusr 0.00 csys = 0.58 CPU)
Time for count_distinct_group_on_key_parts (1000:100000): 194 wallclock secs ( 1.12 usr 0.46 sys + 0.00 cusr 0.00 csys = 1.58 CPU)
Time for count_distinct_group (1000:100000): 194 wallclock secs ( 1.21 usr 0.37 sys + 0.00 cusr 0.00 csys = 1.58 CPU)
Time for count_distinct_big (100:1000000): 67 wallclock secs ( 7.77 usr 3.16 sys + 0.00 cusr 0.00 csys = 10.93 CPU)
Total time: 1949 wallclock secs (70.03 usr 16.42 sys + 0.00 cusr 0.00 csys = 86.45 CPU)
Testing server 'MySQL 3.23.39' at 2001-06-05 15:13:43
Wisconsin benchmark test
Time for create_table (3): 0 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Inserting data
Time to insert (31000): 13 wallclock secs ( 1.59 usr 1.18 sys + 0.00 cusr 0.00 csys = 2.77 CPU)
Time to delete_big (1): 1 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Running actual benchmark
Time for wisc_benchmark (114): 5 wallclock secs ( 2.33 usr 0.52 sys + 0.00 cusr 0.00 csys = 2.85 CPU)
Total time: 19 wallclock secs ( 3.92 usr 1.70 sys + 0.00 cusr 0.00 csys = 5.62 CPU)
......@@ -458,7 +458,7 @@ sub gd {
# set a color per server so in every result it has the same color ....
foreach $key (@key_order) {
if ($tot{$key}{'server'} =~ /mysql/i) {
if ($key =~ /mysql_pgcc/i || $key =~ /mysql_odbc/i) {
if ($key =~ /mysql_pgcc/i || $key =~ /mysql_odbc/i || $key =~ /mysql_fast/i) {
$tot{$key}{'color'} = $lblue;
} else {
$tot{$key}{'color'} = $blue;
......
......@@ -1593,7 +1593,7 @@ double Field_longlong::val_real(void)
else
#endif
longlongget(j,ptr);
return unsigned_flag ? ulonglong2double(j) : (double) j;
return unsigned_flag ? ulonglong2double((ulonglong) j) : (double) j;
}
longlong Field_longlong::val_int(void)
......
......@@ -875,8 +875,19 @@ bool Item_sum_count_distinct::setup(THD *thd)
List<Item> list;
/* Create a table with an unique key over all parameters */
for (uint i=0; i < arg_count ; i++)
if (list.push_back(args[i]))
return 1;
{
Item *item=args[i];
if (list.push_back(item))
return 1; // End of memory
if (item->const_item())
{
(void) item->val_int();
if (item->null_value)
always_null=1;
}
}
if (always_null)
return 0;
count_field_types(tmp_table_param,list,0);
if (table)
{
......@@ -978,20 +989,22 @@ int Item_sum_count_distinct::tree_to_myisam()
void Item_sum_count_distinct::reset()
{
if(use_tree)
if (use_tree)
reset_tree(&tree);
else
{
table->file->extra(HA_EXTRA_NO_CACHE);
table->file->delete_all_rows();
table->file->extra(HA_EXTRA_WRITE_CACHE);
}
else if (table)
{
table->file->extra(HA_EXTRA_NO_CACHE);
table->file->delete_all_rows();
table->file->extra(HA_EXTRA_WRITE_CACHE);
}
(void) add();
}
bool Item_sum_count_distinct::add()
{
int error;
if (always_null)
return 0;
copy_fields(tmp_table_param);
copy_funcs(tmp_table_param->funcs);
......
......@@ -148,20 +148,21 @@ class Item_sum_count_distinct :public Item_sum_int
bool fix_fields(THD *thd,TABLE_LIST *tables);
TMP_TABLE_PARAM *tmp_table_param;
TREE tree;
// calculated based on max_heap_table_size. If reached,
// walk the tree and dump it into MyISAM table
uint max_elements_in_tree;
// the first few bytes of record ( at least one)
// are just markers for deleted and NULLs. We want to skip them since
// they will just bloat the tree without providing any valuable info
int rec_offset;
// If there are no blobs, we can use a tree, which
// is faster than heap table. In that case, we still use the table
// to help get things set up, but we insert nothing in it
bool use_tree;
// the first few bytes of record ( at least one)
// are just markers for deleted and NULLs. We want to skip them since
// they will just bloat the tree without providing any valuable info
int rec_offset;
bool always_null; // Set to 1 if the result is always NULL
int tree_to_myisam();
......@@ -172,7 +173,7 @@ class Item_sum_count_distinct :public Item_sum_int
public:
Item_sum_count_distinct(List<Item> &list)
:Item_sum_int(list),table(0),used_table_cache(~(table_map) 0),
tmp_table_param(0),use_tree(0)
tmp_table_param(0),always_null(0),use_tree(0)
{ quick_group=0; }
~Item_sum_count_distinct();
table_map used_tables() const { return used_table_cache; }
......
......@@ -550,7 +550,7 @@ extern ulong keybuff_size,sortbuff_size,max_item_sort_length,table_cache_size,
binlog_cache_size, max_binlog_cache_size;
extern ulong specialflag, current_pid;
extern bool low_priority_updates, using_update_log;
extern bool opt_sql_bin_update, opt_safe_show_db;
extern bool opt_sql_bin_update, opt_safe_show_db, opt_warnings;
extern char language[LIBLEN],reg_ext[FN_EXTLEN],blob_newline;
extern const char **errmesg; /* Error messages */
extern const char *default_tx_isolation_name;
......
......@@ -108,7 +108,7 @@ static void die(const char* fmt, ...)
static void print_version()
{
printf("%s Ver 1.3 for %s at %s\n",my_progname,SYSTEM_TYPE, MACHINE_TYPE);
printf("%s Ver 1.4 for %s at %s\n",my_progname,SYSTEM_TYPE, MACHINE_TYPE);
}
......@@ -132,7 +132,7 @@ the mysql command line client\n\n");
-s, --short-form Just show the queries, no extra info\n\
-o, --offset=N Skip the first N entries\n\
-h, --host=server Get the binlog from server\n\
-P, --port=port Use port to connect to the remove server\n\
-P, --port=port Use port to connect to the remote server\n\
-u, --user=username Connect to the remove server as username\n\
-p, --password=password Password to connect to remote server\n\
-r, --result-file=file Direct output to a given file\n\
......
......@@ -220,7 +220,7 @@ static char mysql_home[FN_REFLEN],pidfile_name[FN_REFLEN];
static pthread_t select_thread;
static bool opt_log,opt_update_log,opt_bin_log,opt_slow_log,opt_noacl,
opt_disable_networking=0, opt_bootstrap=0,opt_skip_show_db=0,
opt_ansi_mode=0,opt_myisam_log=0,
opt_ansi_mode=0,opt_myisam_log=0,
opt_large_files=sizeof(my_off_t) > 4;
bool opt_sql_bin_update = 0, opt_log_slave_updates = 0, opt_safe_show_db=0,
opt_show_slave_auth_info = 0;
......@@ -289,7 +289,7 @@ ulong max_tmp_tables,max_heap_table_size;
ulong bytes_sent = 0L, bytes_received = 0L;
bool opt_endinfo,using_udf_functions,low_priority_updates, locked_in_memory;
bool opt_using_transactions, using_update_log;
bool opt_using_transactions, using_update_log, opt_warnings=0;
bool volatile abort_loop,select_thread_in_use,grant_option;
bool volatile ready_to_exit,shutdown_in_progress;
ulong refresh_version=1L,flush_version=1L; /* Increments on each reload */
......@@ -1205,7 +1205,7 @@ Some pointers may be invalid and cause the dump to abort...\n");
fprintf(stderr, "\n
Successfully dumped variables, if you ran with --log, take a look at the\n\
details of what thread %ld did to cause the crash. In some cases of really\n\
bad corruption, the above values may be invalid\n\n",
bad corruption, the values shown above may be invalid\n\n",
thd->thread_id);
}
fprintf(stderr, "\
......@@ -2479,7 +2479,7 @@ enum options {
OPT_INNODB_LOG_ARCH_DIR,
OPT_INNODB_LOG_ARCHIVE,
OPT_INNODB_FLUSH_LOG_AT_TRX_COMMIT,
OPT_INNODB_UNIX_FILE_FLUSH_METHOD,
OPT_innodb_flush_method,
OPT_SAFE_SHOW_DB,
OPT_GEMINI_SKIP, OPT_INNODB_SKIP,
OPT_TEMP_POOL, OPT_DO_PSTACK, OPT_TX_ISOLATION,
......@@ -2544,7 +2544,7 @@ static struct option long_options[] = {
OPT_INNODB_LOG_ARCHIVE},
{"innodb_flush_log_at_trx_commit", optional_argument, 0,
OPT_INNODB_FLUSH_LOG_AT_TRX_COMMIT},
{"innodb_unix_file_flush_method", required_argument, 0,
{"innodb_flush_method", required_argument, 0,
OPT_INNODB_UNIX_FILE_FLUSH_METHOD},
#endif
{"help", no_argument, 0, '?'},
......@@ -2636,6 +2636,7 @@ static struct option long_options[] = {
{"use-locking", no_argument, 0, (int) OPT_USE_LOCKING},
{"user", required_argument, 0, 'u'},
{"version", no_argument, 0, 'V'},
{"warnings", no_argument, 0, 'W'},
{0, 0, 0, 0}
};
......@@ -3050,6 +3051,8 @@ static void usage(void)
-O, --set-variable var=option\n\
Give a variable an value. --help lists variables\n\
--safe-mode Skip some optimize stages (for testing)\n\
--safe-show-database Don't show databases for which the user has no\n\
privileges\n\
--skip-concurrent-insert\n\
Don't use concurrent insert with MyISAM\n\
--skip-delay-key-write\n\
......@@ -3076,7 +3079,8 @@ static void usage(void)
Default transaction isolation level\n\
--temp-pool Use a pool of temporary files\n\
-u, --user=user_name Run mysqld daemon as user\n\
-V, --version output version information and exit");
-V, --version output version information and exit\n\
-W, --warnings Log some not critical warnings to the log file\n");
#ifdef __WIN__
puts("NT and Win32 specific options:\n\
--console Don't remove the console window\n\
......@@ -3113,7 +3117,7 @@ static void usage(void)
puts("\
--innodb_data_home_dir=dir The common part for Innodb table spaces\n\
--innodb_data_file_path=dir Path to individual files and their sizes\n\
--innodb_flush_method=# Which method to flush data\n\
--innodb_flush_method=# With which method to flush data\n\
--innodb_flush_log_at_trx_commit[=#]\n\
Set to 0 if you don't want to flush logs\n\
--innodb_log_arch_dir=dir Where full logs should be archived\n\
......@@ -3209,7 +3213,7 @@ static void get_options(int argc,char **argv)
myisam_delay_key_write=1; // Allow use of this
my_use_symdir=1; // Use internal symbolic links
while ((c=getopt_long(argc,argv,"ab:C:h:#::T::?l::L:O:P:sS::t:u:noVvI?",
while ((c=getopt_long(argc,argv,"ab:C:h:#::T::?l::L:O:P:sS::t:u:noVvWI?",
long_options, &option_index)) != EOF)
{
switch(c) {
......@@ -3219,6 +3223,9 @@ static void get_options(int argc,char **argv)
#endif
opt_endinfo=1; /* unireg: memory allocation */
break;
case 'W':
opt_warnings=1;
break;
case 'a':
opt_ansi_mode=1;
thd_startup_options|=OPTION_ANSI_MODE;
......
......@@ -504,11 +504,12 @@ void close_temporary(TABLE *table,bool delete_table)
void close_temporary_tables(THD *thd)
{
TABLE *table,*next;
uint init_query_buf_size = 11, query_buf_size; // "drop table "
char* query, *p;
char *query, *end;
const uint init_query_buf_size = 11; // "drop table "
uint query_buf_size;
bool found_user_tables = 0;
LINT_INIT(p);
LINT_INIT(end);
query_buf_size = init_query_buf_size;
for (table=thd->temporary_tables ; table ; table=table->next)
......@@ -516,37 +517,37 @@ void close_temporary_tables(THD *thd)
query_buf_size += table->key_length;
}
if(query_buf_size == init_query_buf_size)
if (query_buf_size == init_query_buf_size)
return; // no tables to close
if((query = alloc_root(&thd->mem_root, query_buf_size)))
{
memcpy(query, "drop table ", init_query_buf_size);
p = query + init_query_buf_size;
}
if ((query = alloc_root(&thd->mem_root, query_buf_size)))
{
memcpy(query, "drop table ", init_query_buf_size);
end = query + init_query_buf_size;
}
for (table=thd->temporary_tables ; table ; table=next)
{
if(query) // we might be out of memory, but this is not fatal
if (query) // we might be out of memory, but this is not fatal
{
// skip temporary tables not created directly by the user
if (table->table_name[0] != '#')
{
// skip temporary tables not created directly by the user
if(table->table_name[0] != '#')
{
p = strxmov(p,table->table_cache_key,".",
table->table_name,",", NullS);
// here we assume table_cache_key always starts
// with \0 terminated db name
found_user_tables = 1;
}
end = strxmov(end,table->table_cache_key,".",
table->table_name,",", NullS);
// here we assume table_cache_key always starts
// with \0 terminated db name
found_user_tables = 1;
}
}
next=table->next;
close_temporary(table);
}
if (query && found_user_tables && mysql_bin_log.is_open())
{
uint save_query_len = thd->query_length;
*--p = 0;
thd->query_length = (uint)(p-query);
*--end = 0; // Remove last ','
thd->query_length = (uint)(end-query);
Query_log_event qinfo(thd, query);
mysql_bin_log.write(&qinfo);
thd->query_length = save_query_len;
......
......@@ -556,6 +556,7 @@ pthread_handler_decl(handle_one_connection,arg)
free_root(&thd->mem_root,MYF(0));
if (net->error && net->vio != 0)
{
if (!thd->killed && ! opt_warnings)
sql_print_error(ER(ER_NEW_ABORTING_CONNECTION),
thd->thread_id,(thd->db ? thd->db : "unconnected"),
thd->user ? thd->user : "unauthenticated",
......
......@@ -403,7 +403,22 @@ mysql_select(THD *thd,TABLE_LIST *tables,List<Item> &fields,COND *conds,
goto err; /* purecov: inspected */
}
if (join.const_tables && !thd->locked_tables)
{
TABLE **table, **end;
for (table=join.table, end=table + join.const_tables ;
table != end;
table++)
{
/* BDB tables require that we call index_end() before doing an unlock */
if ((*table)->key_read)
{
(*table)->key_read=0;
(*table)->file->extra(HA_EXTRA_NO_KEYREAD);
}
(*table)->file->index_end();
}
mysql_unlock_some_tables(thd, join.table,join.const_tables);
}
if (!conds && join.outer_join)
{
/* Handle the case where we have an OUTER JOIN without a WHERE */
......@@ -2810,7 +2825,12 @@ return_zero_rows(select_result *result,TABLE_LIST *tables,List<Item> &fields,
if (send_row)
result->send_data(fields);
if (tables) // Not from do_select()
{
/* Close open cursors */
for (TABLE_LIST *table=tables; table ; table=table->next)
table->table->file->index_end();
result->send_eof(); // Should be safe
}
}
DBUG_RETURN(0);
}
......
......@@ -844,18 +844,22 @@ store_create_info(THD *thd, TABLE *table, String *packet)
for (uint i=0 ; i < table->keys ; i++,key_info++)
{
KEY_PART_INFO *key_part= key_info->key_part;
bool found_primary=0;
packet->append(",\n ", 4);
KEY_PART_INFO *key_part= key_info->key_part;
if (i == primary_key)
if (i == primary_key && !strcmp(key_info->name,"PRIMARY"))
{
found_primary=1;
packet->append("PRIMARY ", 8);
}
else if (key_info->flags & HA_NOSAME)
packet->append("UNIQUE ", 7);
else if (key_info->flags & HA_FULLTEXT)
packet->append("FULLTEXT ", 9);
packet->append("KEY ", 4);
if (i != primary_key)
if (!found_primary)
append_identifier(thd,packet,key_info->name);
packet->append(" (", 2);
......
......@@ -455,8 +455,8 @@ str_to_TIME(const char *str, uint length, TIME *l_time,bool fuzzy_date)
if ((date[i]=tmp_value))
date_used=1; // Found something
if (i == 2 && str != end && *str == 'T')
str++; // ISO8601: CCYYMMDDThhmmss
else
str++; // ISO8601: CCYYMMDDThhmmss
else if ( i != 5 ) // Skip inter-field delimiters
{
while (str != end && (ispunct(*str) || isspace(*str)))
{
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment