Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
M
mariadb
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Kirill Smelkov
mariadb
Commits
1e4ccec8
Commit
1e4ccec8
authored
Jul 04, 2006
by
tomas@poseidon.ndb.mysql.com
Browse files
Options
Browse Files
Download
Plain Diff
Merge poseidon.ndb.mysql.com:/home/tomas/mysql-5.0
into poseidon.ndb.mysql.com:/home/tomas/mysql-5.0-release
parents
b3e368df
98874725
Changes
3
Show whitespace changes
Inline
Side-by-side
Showing
3 changed files
with
81 additions
and
48 deletions
+81
-48
sql/ha_ndbcluster.cc
sql/ha_ndbcluster.cc
+58
-45
sql/handler.h
sql/handler.h
+1
-0
sql/sql_update.cc
sql/sql_update.cc
+22
-3
No files found.
sql/ha_ndbcluster.cc
View file @
1e4ccec8
...
...
@@ -363,6 +363,7 @@ void ha_ndbcluster::records_update()
{
Ndb
*
ndb
=
get_ndb
();
struct
Ndb_statistics
stat
;
ndb
->
setDatabaseName
(
m_dbname
);
if
(
ndb_get_table_statistics
(
ndb
,
m_tabname
,
&
stat
)
==
0
){
mean_rec_length
=
stat
.
row_size
;
data_file_length
=
stat
.
fragment_memory
;
...
...
@@ -3081,6 +3082,7 @@ void ha_ndbcluster::info(uint flag)
DBUG_VOID_RETURN
;
Ndb
*
ndb
=
get_ndb
();
struct
Ndb_statistics
stat
;
ndb
->
setDatabaseName
(
m_dbname
);
if
(
current_thd
->
variables
.
ndb_use_exact_count
&&
ndb_get_table_statistics
(
ndb
,
m_tabname
,
&
stat
)
==
0
)
{
...
...
@@ -4578,7 +4580,8 @@ ha_ndbcluster::ha_ndbcluster(TABLE *table_arg):
HA_NO_PREFIX_CHAR_KEYS
|
HA_NEED_READ_RANGE_BUFFER
|
HA_CAN_GEOMETRY
|
HA_CAN_BIT_FIELD
),
HA_CAN_BIT_FIELD
|
HA_PARTIAL_COLUMN_READ
),
m_share
(
0
),
m_use_write
(
FALSE
),
m_ignore_dup_key
(
FALSE
),
...
...
@@ -5842,62 +5845,60 @@ ndb_get_table_statistics(Ndb* ndb, const char * table,
DBUG_ENTER
(
"ndb_get_table_statistics"
);
DBUG_PRINT
(
"enter"
,
(
"table: %s"
,
table
));
NdbTransaction
*
pTrans
;
NdbError
error
;
int
retries
=
10
;
int
retry_sleep
=
30
*
1000
;
/* 30 milliseconds */
do
{
pTrans
=
ndb
->
startTransaction
();
if
(
pTrans
==
NULL
)
{
if
(
ndb
->
getNdbError
().
status
==
NdbError
::
TemporaryError
&&
retries
--
)
Uint64
rows
,
commits
,
mem
;
Uint32
size
;
Uint32
count
=
0
;
Uint64
sum_rows
=
0
;
Uint64
sum_commits
=
0
;
Uint64
sum_row_size
=
0
;
Uint64
sum_mem
=
0
;
NdbScanOperation
*
pOp
;
NdbResultSet
*
rs
;
int
check
;
if
((
pTrans
=
ndb
->
startTransaction
())
==
NULL
)
{
my_sleep
(
retry_sleep
);
continue
;
}
break
;
error
=
ndb
->
getNdbError
();
goto
retry
;
}
NdbScanOperation
*
pOp
=
pTrans
->
getNdbScanOperation
(
table
);
if
(
pOp
==
NULL
)
break
;
if
((
pOp
=
pTrans
->
getNdbScanOperation
(
table
))
==
NULL
)
{
error
=
pTrans
->
getNdbError
();
goto
retry
;
}
if
(
pOp
->
readTuples
(
NdbOperation
::
LM_CommittedRead
))
break
;
{
error
=
pOp
->
getNdbError
();
goto
retry
;
}
int
check
=
pOp
->
interpret_exit_last_row
();
if
(
check
==
-
1
)
break
;
if
(
pOp
->
interpret_exit_last_row
()
==
-
1
)
{
error
=
pOp
->
getNdbError
();
goto
retry
;
}
Uint64
rows
,
commits
,
mem
;
Uint32
size
;
pOp
->
getValue
(
NdbDictionary
::
Column
::
ROW_COUNT
,
(
char
*
)
&
rows
);
pOp
->
getValue
(
NdbDictionary
::
Column
::
COMMIT_COUNT
,
(
char
*
)
&
commits
);
pOp
->
getValue
(
NdbDictionary
::
Column
::
ROW_SIZE
,
(
char
*
)
&
size
);
pOp
->
getValue
(
NdbDictionary
::
Column
::
FRAGMENT_MEMORY
,
(
char
*
)
&
mem
);
check
=
pTrans
->
execute
(
NdbTransaction
::
NoCommit
,
if
(
pTrans
->
execute
(
NdbTransaction
::
NoCommit
,
NdbTransaction
::
AbortOnError
,
TRUE
);
if
(
check
==
-
1
)
TRUE
)
==
-
1
)
{
if
(
pTrans
->
getNdbError
().
status
==
NdbError
::
TemporaryError
&&
retries
--
)
{
ndb
->
closeTransaction
(
pTrans
);
pTrans
=
0
;
my_sleep
(
retry_sleep
);
continue
;
}
break
;
error
=
pTrans
->
getNdbError
();
goto
retry
;
}
Uint32
count
=
0
;
Uint64
sum_rows
=
0
;
Uint64
sum_commits
=
0
;
Uint64
sum_row_size
=
0
;
Uint64
sum_mem
=
0
;
while
((
check
=
pOp
->
nextResult
(
TRUE
,
TRUE
))
==
0
)
{
sum_rows
+=
rows
;
...
...
@@ -5909,7 +5910,10 @@ ndb_get_table_statistics(Ndb* ndb, const char * table,
}
if
(
check
==
-
1
)
break
;
{
error
=
pOp
->
getNdbError
();
goto
retry
;
}
pOp
->
close
(
TRUE
);
...
...
@@ -5926,12 +5930,21 @@ ndb_get_table_statistics(Ndb* ndb, const char * table,
sum_mem
,
count
));
DBUG_RETURN
(
0
);
}
while
(
1
);
retry:
if
(
pTrans
)
{
ndb
->
closeTransaction
(
pTrans
);
DBUG_PRINT
(
"exit"
,
(
"failed"
));
DBUG_RETURN
(
-
1
);
pTrans
=
NULL
;
}
if
(
error
.
status
==
NdbError
::
TemporaryError
&&
retries
--
)
{
my_sleep
(
retry_sleep
);
continue
;
}
break
;
}
while
(
1
);
DBUG_PRINT
(
"exit"
,
(
"failed, error %u(%s)"
,
error
.
code
,
error
.
message
));
ERR_RETURN
(
error
);
}
/*
...
...
sql/handler.h
View file @
1e4ccec8
...
...
@@ -57,6 +57,7 @@
see mi_rsame/heap_rsame/myrg_rsame
*/
#define HA_READ_RND_SAME (1 << 0)
#define HA_PARTIAL_COLUMN_READ (1 << 1)
/* read may not return all columns */
#define HA_TABLE_SCAN_ON_INDEX (1 << 2)
/* No separate data/index file */
#define HA_REC_NOT_IN_SEQ (1 << 3)
/* ha_info don't return recnumber;
It returns a position to ha_r_rnd */
...
...
sql/sql_update.cc
View file @
1e4ccec8
...
...
@@ -120,6 +120,7 @@ int mysql_update(THD *thd,
bool
using_limit
=
limit
!=
HA_POS_ERROR
;
bool
safe_update
=
thd
->
options
&
OPTION_SAFE_UPDATES
;
bool
used_key_is_modified
,
transactional_table
;
bool
can_compare_record
;
int
res
;
int
error
;
uint
used_index
=
MAX_KEY
;
...
...
@@ -433,6 +434,13 @@ int mysql_update(THD *thd,
(
MODE_STRICT_TRANS_TABLES
|
MODE_STRICT_ALL_TABLES
)));
/*
We can use compare_record() to optimize away updates if
the table handler is returning all columns
*/
can_compare_record
=
!
(
table
->
file
->
table_flags
()
&
HA_PARTIAL_COLUMN_READ
);
while
(
!
(
error
=
info
.
read_record
(
&
info
))
&&
!
thd
->
killed
)
{
if
(
!
(
select
&&
select
->
skip_record
()))
...
...
@@ -445,7 +453,7 @@ int mysql_update(THD *thd,
found
++
;
if
(
compare_record
(
table
,
query_id
))
if
(
!
can_compare_record
||
compare_record
(
table
,
query_id
))
{
if
((
res
=
table_list
->
view_check_option
(
thd
,
ignore
))
!=
VIEW_CHECK_OK
)
...
...
@@ -1248,8 +1256,15 @@ bool multi_update::send_data(List<Item> ¬_used_values)
uint
offset
=
cur_table
->
shared
;
table
->
file
->
position
(
table
->
record
[
0
]);
/*
We can use compare_record() to optimize away updates if
the table handler is returning all columns
*/
if
(
table
==
table_to_update
)
{
bool
can_compare_record
;
can_compare_record
=
!
(
table
->
file
->
table_flags
()
&
HA_PARTIAL_COLUMN_READ
);
table
->
status
|=
STATUS_UPDATED
;
store_record
(
table
,
record
[
1
]);
if
(
fill_record_n_invoke_before_triggers
(
thd
,
*
fields_for_table
[
offset
],
...
...
@@ -1259,7 +1274,7 @@ bool multi_update::send_data(List<Item> ¬_used_values)
DBUG_RETURN
(
1
);
found
++
;
if
(
compare_record
(
table
,
thd
->
query_id
))
if
(
!
can_compare_record
||
compare_record
(
table
,
thd
->
query_id
))
{
int
error
;
if
((
error
=
cur_table
->
view_check_option
(
thd
,
ignore
))
!=
...
...
@@ -1376,6 +1391,7 @@ int multi_update::do_updates(bool from_send_error)
for
(
cur_table
=
update_tables
;
cur_table
;
cur_table
=
cur_table
->
next_local
)
{
byte
*
ref_pos
;
bool
can_compare_record
;
table
=
cur_table
->
table
;
if
(
table
==
table_to_update
)
...
...
@@ -1402,6 +1418,9 @@ int multi_update::do_updates(bool from_send_error)
if
((
local_error
=
tmp_table
->
file
->
ha_rnd_init
(
1
)))
goto
err
;
can_compare_record
=
!
(
table
->
file
->
table_flags
()
&
HA_PARTIAL_COLUMN_READ
);
ref_pos
=
(
byte
*
)
tmp_table
->
field
[
0
]
->
ptr
;
for
(;;)
{
...
...
@@ -1431,7 +1450,7 @@ int multi_update::do_updates(bool from_send_error)
TRG_ACTION_BEFORE
,
TRUE
))
goto
err2
;
if
(
compare_record
(
table
,
thd
->
query_id
))
if
(
!
can_compare_record
||
compare_record
(
table
,
thd
->
query_id
))
{
if
((
local_error
=
table
->
file
->
update_row
(
table
->
record
[
1
],
table
->
record
[
0
])))
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment