Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
M
mariadb
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Kirill Smelkov
mariadb
Commits
210d0550
Commit
210d0550
authored
Mar 23, 2007
by
tomas@poseidon.mysql.com
Browse files
Options
Browse Files
Download
Plain Diff
Merge tulin@bk-internal.mysql.com:/home/bk/mysql-5.0-ndb
into poseidon.mysql.com:/home/tomas/mysql-5.0-ndb
parents
cb4d64ad
2ff03a22
Changes
17
Hide whitespace changes
Inline
Side-by-side
Showing
17 changed files
with
1280 additions
and
149 deletions
+1280
-149
mysql-test/include/ndb_backup.inc
mysql-test/include/ndb_backup.inc
+24
-0
mysql-test/include/ndb_backup_print.inc
mysql-test/include/ndb_backup_print.inc
+6
-0
mysql-test/r/ndb_restore_print.result
mysql-test/r/ndb_restore_print.result
+321
-0
mysql-test/r/ndb_single_user.result
mysql-test/r/ndb_single_user.result
+9
-9
mysql-test/t/ndb_restore_print.test
mysql-test/t/ndb_restore_print.test
+189
-0
ndb/include/ndbapi/NdbRecAttr.hpp
ndb/include/ndbapi/NdbRecAttr.hpp
+50
-0
ndb/include/util/OutputStream.hpp
ndb/include/util/OutputStream.hpp
+2
-1
ndb/src/kernel/blocks/dbtc/Dbtc.hpp
ndb/src/kernel/blocks/dbtc/Dbtc.hpp
+8
-3
ndb/src/kernel/blocks/dbtc/DbtcMain.cpp
ndb/src/kernel/blocks/dbtc/DbtcMain.cpp
+118
-15
ndb/src/ndbapi/ClusterMgr.cpp
ndb/src/ndbapi/ClusterMgr.cpp
+1
-1
ndb/src/ndbapi/Ndb.cpp
ndb/src/ndbapi/Ndb.cpp
+11
-0
ndb/src/ndbapi/NdbRecAttr.cpp
ndb/src/ndbapi/NdbRecAttr.cpp
+118
-26
ndb/src/ndbapi/TransporterFacade.hpp
ndb/src/ndbapi/TransporterFacade.hpp
+5
-11
ndb/tools/restore/Restore.cpp
ndb/tools/restore/Restore.cpp
+54
-6
ndb/tools/restore/Restore.hpp
ndb/tools/restore/Restore.hpp
+16
-2
ndb/tools/restore/consumer_printer.cpp
ndb/tools/restore/consumer_printer.cpp
+17
-5
ndb/tools/restore/restore_main.cpp
ndb/tools/restore/restore_main.cpp
+331
-70
No files found.
mysql-test/include/ndb_backup.inc
0 → 100644
View file @
210d0550
######################################################
# By JBM 2006-02-16 So that the code is not repeated #
# in test cases and can be reused. #
######################################################
--
exec
$NDB_MGM
--
no
-
defaults
--
ndb
-
connectstring
=
"localhost:
$NDBCLUSTER_PORT
"
-
e
"start backup"
>>
$NDB_TOOLS_OUTPUT
# there is no neat way to find the backupid, this is a hack to find it...
--
exec
$NDB_TOOLS_DIR
/
ndb_select_all
--
ndb
-
connectstring
=
"localhost:
$NDBCLUSTER_PORT
"
-
d
sys
--
delimiter
=
','
SYSTAB_0
|
grep
520093696
>
$MYSQLTEST_VARDIR
/
tmp
.
dat
CREATE
TEMPORARY
TABLE
IF
NOT
EXISTS
test
.
backup_info
(
id
INT
,
backup_id
INT
)
ENGINE
=
HEAP
;
DELETE
FROM
test
.
backup_info
;
LOAD
DATA
INFILE
'../tmp.dat'
INTO
TABLE
test
.
backup_info
FIELDS
TERMINATED
BY
','
;
--
replace_column
1
<
the_backup_id
>
SELECT
@
the_backup_id
:=
backup_id
FROM
test
.
backup_info
;
let
the_backup_id
=
`select @the_backup_id`
;
DROP
TABLE
test
.
backup_info
;
mysql-test/include/ndb_backup_print.inc
0 → 100644
View file @
210d0550
--
exec
$NDB_TOOLS_DIR
/
ndb_restore
--
no
-
defaults
$ndb_restore_opts
-
b
$the_backup_id
-
n
1
$NDB_BACKUP_DIR
/
BACKUP
/
BACKUP
-
$the_backup_id
$ndb_restore_filter
>
$MYSQLTEST_VARDIR
/
tmp
/
tmp
.
dat
--
exec
$NDB_TOOLS_DIR
/
ndb_restore
--
no
-
defaults
$ndb_restore_opts
-
b
$the_backup_id
-
n
2
$NDB_BACKUP_DIR
/
BACKUP
/
BACKUP
-
$the_backup_id
$ndb_restore_filter
>>
$MYSQLTEST_VARDIR
/
tmp
/
tmp
.
dat
--
exec
sort
$MYSQLTEST_VARDIR
/
tmp
/
tmp
.
dat
--
exec
rm
-
f
$MYSQLTEST_VARDIR
/
tmp
/
tmp
.
dat
--
let
ndb_restore_opts
=
--
let
ndb_restore_filter
=
mysql-test/r/ndb_restore_print.result
0 → 100644
View file @
210d0550
use test;
drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9,t10;
create table t1
(pk int key
,a1 BIT(1), a2 BIT(5), a3 BIT(33), a4 BIT(63), a5 BIT(64)
,b1 TINYINT, b2 TINYINT UNSIGNED
,c1 SMALLINT, c2 SMALLINT UNSIGNED
,d1 INT, d2 INT UNSIGNED
,e1 BIGINT, e2 BIGINT UNSIGNED
,f1 CHAR(1) BINARY, f2 CHAR(32) BINARY, f3 CHAR(255) BINARY
,g1 VARCHAR(32) BINARY, g2 VARCHAR(255) BINARY, g3 VARCHAR(1000) BINARY
,h1 BINARY(1), h2 BINARY(8), h3 BINARY(255)
,i1 VARBINARY(32), i2 VARBINARY(255), i3 VARBINARY(1000)
) engine myisam;
insert into t1 values
(1
,0x1, 0x17, 0x789a, 0x789abcde, 0xfedc0001
,127, 255
,32767, 65535
,2147483647, 4294967295
,9223372036854775807, 18446744073709551615
,'1','12345678901234567890123456789012','123456789'
,'1','12345678901234567890123456789012','123456789'
,0x12,0x123456789abcdef0, 0x012345
,0x12,0x123456789abcdef0, 0x00123450
);
insert into t1 values
(2
,0, 0, 0, 0, 0
,-128, 0
,-32768, 0
,-2147483648, 0
,-9223372036854775808, 0
,'','',''
,'','',''
,0x0,0x0,0x0
,0x0,0x0,0x0
);
insert into t1 values
(3
,NULL,NULL,NULL,NULL,NULL
,NULL,NULL
,NULL,NULL
,NULL,NULL
,NULL,NULL
,NULL,NULL,NULL
,NULL,NULL,NULL
,NULL,NULL,NULL
,NULL,NULL,NULL
);
select pk
,hex(a1), hex(a2), hex(a3), hex(a4), hex(a5)
,b1, b2
,c1 , c2
,d1 , d2
,e1 , e2
,f1 , f2, f3
,g1 , g2, g3
,hex(h1), hex(h2), hex(h3)
,hex(i1), hex(i2), hex(i3)
from t1 order by pk;
pk 1
hex(a1) 1
hex(a2) 17
hex(a3) 789A
hex(a4) 789ABCDE
hex(a5) FEDC0001
b1 127
b2 255
c1 32767
c2 65535
d1 2147483647
d2 4294967295
e1 9223372036854775807
e2 18446744073709551615
f1 1
f2 12345678901234567890123456789012
f3 123456789
g1 1
g2 12345678901234567890123456789012
g3 123456789
hex(h1) 12
hex(h2) 123456789ABCDEF0
hex(h3) 012345000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
hex(i1) 12
hex(i2) 123456789ABCDEF0
hex(i3) 00123450
pk 2
hex(a1) 0
hex(a2) 0
hex(a3) 0
hex(a4) 0
hex(a5) 0
b1 -128
b2 0
c1 -32768
c2 0
d1 -2147483648
d2 0
e1 -9223372036854775808
e2 0
f1
f2
f3
g1
g2
g3
hex(h1) 00
hex(h2) 0000000000000000
hex(h3) 000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
hex(i1) 00
hex(i2) 00
hex(i3) 00
pk 3
hex(a1) NULL
hex(a2) NULL
hex(a3) NULL
hex(a4) NULL
hex(a5) NULL
b1 NULL
b2 NULL
c1 NULL
c2 NULL
d1 NULL
d2 NULL
e1 NULL
e2 NULL
f1 NULL
f2 NULL
f3 NULL
g1 NULL
g2 NULL
g3 NULL
hex(h1) NULL
hex(h2) NULL
hex(h3) NULL
hex(i1) NULL
hex(i2) NULL
hex(i3) NULL
alter table t1 engine ndb;
select pk
,hex(a1), hex(a2), hex(a3), hex(a4), hex(a5)
,b1, b2
,c1 , c2
,d1 , d2
,e1 , e2
,f1 , f2, f3
,g1 , g2, g3
,hex(h1), hex(h2), hex(h3)
,hex(i1), hex(i2), hex(i3)
from t1 order by pk;
pk 1
hex(a1) 1
hex(a2) 17
hex(a3) 789A
hex(a4) 789ABCDE
hex(a5) FEDC0001
b1 127
b2 255
c1 32767
c2 65535
d1 2147483647
d2 4294967295
e1 9223372036854775807
e2 18446744073709551615
f1 1
f2 12345678901234567890123456789012
f3 123456789
g1 1
g2 12345678901234567890123456789012
g3 123456789
hex(h1) 12
hex(h2) 123456789ABCDEF0
hex(h3) 012345000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
hex(i1) 12
hex(i2) 123456789ABCDEF0
hex(i3) 00123450
pk 2
hex(a1) 0
hex(a2) 0
hex(a3) 0
hex(a4) 0
hex(a5) 0
b1 -128
b2 0
c1 -32768
c2 0
d1 -2147483648
d2 0
e1 -9223372036854775808
e2 0
f1
f2
f3
g1
g2
g3
hex(h1) 00
hex(h2) 0000000000000000
hex(h3) 000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
hex(i1) 00
hex(i2) 00
hex(i3) 00
pk 3
hex(a1) NULL
hex(a2) NULL
hex(a3) NULL
hex(a4) NULL
hex(a5) NULL
b1 NULL
b2 NULL
c1 NULL
c2 NULL
d1 NULL
d2 NULL
e1 NULL
e2 NULL
f1 NULL
f2 NULL
f3 NULL
g1 NULL
g2 NULL
g3 NULL
hex(h1) NULL
hex(h2) NULL
hex(h3) NULL
hex(i1) NULL
hex(i2) NULL
hex(i3) NULL
CREATE TEMPORARY TABLE IF NOT EXISTS test.backup_info (id INT, backup_id INT) ENGINE = HEAP;
DELETE FROM test.backup_info;
LOAD DATA INFILE '../tmp.dat' INTO TABLE test.backup_info FIELDS TERMINATED BY ',';
SELECT @the_backup_id:=backup_id FROM test.backup_info;
@the_backup_id:=backup_id
<the_backup_id>
DROP TABLE test.backup_info;
1;0x1;0x17;0x789A;0x789ABCDE;0xFEDC0001;127;255;32767;65535;2147483647;4294967295;9223372036854775807;18446744073709551615;1;12345678901234567890123456789012;123456789;1;12345678901234567890123456789012;123456789;0x12;0x123456789ABCDEF0;0x012345;0x12;0x123456789ABCDEF0;0x00123450
2;0x0;0x0;0x0;0x0;0x0;-128;0;-32768;0;-2147483648;0;-9223372036854775808;0;;;;;;;0x0;0x0;0x0;0x0;0x0;0x0
3;\N;\N;\N;\N;\N;\N;\N;\N;\N;\N;\N;\N;\N;\N;\N;\N;\N;\N;\N;\N;\N;\N;\N;\N;\N
1,0x1,0x17,0x789A,0x789ABCDE,0xFEDC0001,127,255,32767,65535,2147483647,4294967295,9223372036854775807,18446744073709551615,'1','12345678901234567890123456789012','123456789','1','12345678901234567890123456789012','123456789',0x12,0x123456789ABCDEF0,0x012345,0x12,0x123456789ABCDEF0,0x00123450
2,0x0,0x0,0x0,0x0,0x0,-128,0,-32768,0,-2147483648,0,-9223372036854775808,0,'','','','','','',0x0,0x0,0x0,0x0,0x0,0x0
3,,,,,,,,,,,,,,,,,,,,,,,,,
drop table t1;
create table t1
(pk int key
,f1 CHAR(1) BINARY, f2 CHAR(32) BINARY, f3 CHAR(255) BINARY
,g1 VARCHAR(32) BINARY, g2 VARCHAR(255) BINARY, g3 VARCHAR(1000) BINARY
,h1 BINARY(1), h2 BINARY(9), h3 BINARY(255)
,i1 VARBINARY(32), i2 VARBINARY(255), i3 VARBINARY(1000)
) engine ndb;
insert into t1 values
(1
,'1','12345678901234567890123456789012','123456789 '
,'1 ','12345678901234567890123456789012 ','123456789 '
,0x20,0x123456789abcdef020, 0x012345000020
,0x1200000020,0x123456789abcdef000000020, 0x00123450000020
);
create table t2 (pk int key, a int) engine ndb;
create table t3 (pk int key, a int) engine ndb;
create table t4 (pk int key, a int) engine ndb;
insert into t2 values (1,11),(2,12),(3,13),(4,14),(5,15);
insert into t3 values (1,21),(2,22),(3,23),(4,24),(5,25);
insert into t4 values (1,31),(2,32),(3,33),(4,34),(5,35);
CREATE TEMPORARY TABLE IF NOT EXISTS test.backup_info (id INT, backup_id INT) ENGINE = HEAP;
DELETE FROM test.backup_info;
LOAD DATA INFILE '../tmp.dat' INTO TABLE test.backup_info FIELDS TERMINATED BY ',';
SELECT @the_backup_id:=backup_id FROM test.backup_info;
@the_backup_id:=backup_id
<the_backup_id>
DROP TABLE test.backup_info;
'1' '1' '12345678901234567890123456789012' '123456789' '1' '12345678901234567890123456789012' '123456789' '0x20' '0x123456789ABCDEF020' '0x012345000020' '0x1200000020' '0x123456789ABCDEF000000020' '0x00123450000020'
t1
--
1 1 12345678901234567890123456789012 123456789 1 12345678901234567890123456789012 123456789 0x20 0x123456789ABCDEF020 0x012345000020 0x1200000020 0x123456789ABCDEF000000020 0x00123450000020
t2
--
1 11
2 12
3 13
4 14
5 15
t3
--
1 21
2 22
3 23
4 24
5 25
t4
--
1 31
2 32
3 33
4 34
5 35
drop table t1;
create table t1
(pk int key
,a1 MEDIUMINT, a2 MEDIUMINT UNSIGNED
) engine ndb;
insert into t1 values(1, 8388607, 16777215);
insert into t1 values(2, -8388608, 0);
insert into t1 values(3, -1, 1);
CREATE TEMPORARY TABLE IF NOT EXISTS test.backup_info (id INT, backup_id INT) ENGINE = HEAP;
DELETE FROM test.backup_info;
LOAD DATA INFILE '../tmp.dat' INTO TABLE test.backup_info FIELDS TERMINATED BY ',';
SELECT @the_backup_id:=backup_id FROM test.backup_info;
@the_backup_id:=backup_id
<the_backup_id>
DROP TABLE test.backup_info;
1;8388607;16777215
2;-8388608;0
3;-1;1
drop table t1;
drop table t2;
drop table t3;
drop table t4;
mysql-test/r/ndb_single_user.result
View file @
210d0550
use test;
drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9,t10;
create table t1 (a int key, b int unique, c int) engine ndb;
ERROR HY000: Can't create table './test/t1.frm' (errno:
155
)
ERROR HY000: Can't create table './test/t1.frm' (errno:
299
)
create table t1 (a int key, b int unique, c int) engine ndb;
insert into t1 values (1,1,0),(2,2,0),(3,3,0),(4,4,0),(5,5,0),(6,6,0),(7,7,0),(8,8,0),(9,9,0),(10,10,0);
create table t2 as select * from t1;
...
...
@@ -28,19 +28,19 @@ insert into t1 select * from t2;
drop table t1;
ERROR 42S02: Unknown table 't1'
create index new_index on t1 (c);
ERROR
42S02: Table 'test.t1' doesn't exist
ERROR
HY000: Got error 299 'Operation not allowed or aborted due to single user mode' from ndbcluster
insert into t1 values (1,1,0),(2,2,0),(3,3,0),(4,4,0),(5,5,0),(6,6,0),(7,7,0),(8,8,0),(9,9,0),(10,10,0);
ERROR
42S02: Table 'test.t1' doesn't exist
ERROR
HY000: Got error 299 'Operation not allowed or aborted due to single user mode' from ndbcluster
select * from t1 where a = 1;
ERROR
42S02: Table 'test.t1' doesn't exist
ERROR
HY000: Got error 299 'Operation not allowed or aborted due to single user mode' from ndbcluster
select * from t1 where b = 4;
ERROR
42S02: Table 'test.t1' doesn't exist
ERROR
HY000: Got error 299 'Operation not allowed or aborted due to single user mode' from ndbcluster
update t1 set b=102 where a = 2;
ERROR
42S02: Table 'test.t1' doesn't exist
ERROR
HY000: Got error 299 'Operation not allowed or aborted due to single user mode' from ndbcluster
update t1 set b=103 where b = 3;
ERROR
42S02: Table 'test.t1' doesn't exist
ERROR
HY000: Got error 299 'Operation not allowed or aborted due to single user mode' from ndbcluster
update t1 set b=b+100;
ERROR
42S02: Table 'test.t1' doesn't exist
ERROR
HY000: Got error 299 'Operation not allowed or aborted due to single user mode' from ndbcluster
update t1 set b=b+100 where a > 7;
ERROR
42S02: Table 'test.t1' doesn't exist
ERROR
HY000: Got error 299 'Operation not allowed or aborted due to single user mode' from ndbcluster
drop table t1;
mysql-test/t/ndb_restore_print.test
0 → 100644
View file @
210d0550
--
source
include
/
have_ndb
.
inc
--
source
include
/
ndb_default_cluster
.
inc
--
source
include
/
not_embedded
.
inc
--
disable_warnings
use
test
;
drop
table
if
exists
t1
,
t2
,
t3
,
t4
,
t5
,
t6
,
t7
,
t8
,
t9
,
t10
;
--
enable_warnings
# basic datatypes
create
table
t1
(
pk
int
key
,
a1
BIT
(
1
),
a2
BIT
(
5
),
a3
BIT
(
33
),
a4
BIT
(
63
),
a5
BIT
(
64
)
,
b1
TINYINT
,
b2
TINYINT
UNSIGNED
,
c1
SMALLINT
,
c2
SMALLINT
UNSIGNED
,
d1
INT
,
d2
INT
UNSIGNED
,
e1
BIGINT
,
e2
BIGINT
UNSIGNED
,
f1
CHAR
(
1
)
BINARY
,
f2
CHAR
(
32
)
BINARY
,
f3
CHAR
(
255
)
BINARY
,
g1
VARCHAR
(
32
)
BINARY
,
g2
VARCHAR
(
255
)
BINARY
,
g3
VARCHAR
(
1000
)
BINARY
,
h1
BINARY
(
1
),
h2
BINARY
(
8
),
h3
BINARY
(
255
)
,
i1
VARBINARY
(
32
),
i2
VARBINARY
(
255
),
i3
VARBINARY
(
1000
)
)
engine
myisam
;
# max values
insert
into
t1
values
(
1
,
0x1
,
0x17
,
0x789a
,
0x789abcde
,
0xfedc0001
,
127
,
255
,
32767
,
65535
,
2147483647
,
4294967295
,
9223372036854775807
,
18446744073709551615
,
'1'
,
'12345678901234567890123456789012'
,
'123456789'
,
'1'
,
'12345678901234567890123456789012'
,
'123456789'
,
0x12
,
0x123456789abcdef0
,
0x012345
,
0x12
,
0x123456789abcdef0
,
0x00123450
);
# min values
insert
into
t1
values
(
2
,
0
,
0
,
0
,
0
,
0
,
-
128
,
0
,
-
32768
,
0
,
-
2147483648
,
0
,
-
9223372036854775808
,
0
,
''
,
''
,
''
,
''
,
''
,
''
,
0x0
,
0x0
,
0x0
,
0x0
,
0x0
,
0x0
);
# null values
insert
into
t1
values
(
3
,
NULL
,
NULL
,
NULL
,
NULL
,
NULL
,
NULL
,
NULL
,
NULL
,
NULL
,
NULL
,
NULL
,
NULL
,
NULL
,
NULL
,
NULL
,
NULL
,
NULL
,
NULL
,
NULL
,
NULL
,
NULL
,
NULL
,
NULL
,
NULL
,
NULL
);
--
vertical_results
select
pk
,
hex
(
a1
),
hex
(
a2
),
hex
(
a3
),
hex
(
a4
),
hex
(
a5
)
,
b1
,
b2
,
c1
,
c2
,
d1
,
d2
,
e1
,
e2
,
f1
,
f2
,
f3
,
g1
,
g2
,
g3
,
hex
(
h1
),
hex
(
h2
),
hex
(
h3
)
,
hex
(
i1
),
hex
(
i2
),
hex
(
i3
)
from
t1
order
by
pk
;
alter
table
t1
engine
ndb
;
select
pk
,
hex
(
a1
),
hex
(
a2
),
hex
(
a3
),
hex
(
a4
),
hex
(
a5
)
,
b1
,
b2
,
c1
,
c2
,
d1
,
d2
,
e1
,
e2
,
f1
,
f2
,
f3
,
g1
,
g2
,
g3
,
hex
(
h1
),
hex
(
h2
),
hex
(
h3
)
,
hex
(
i1
),
hex
(
i2
),
hex
(
i3
)
from
t1
order
by
pk
;
--
horizontal_results
--
source
include
/
ndb_backup
.
inc
--
let
ndb_restore_filter
=
test
t1
--
let
ndb_restore_opts
=--
verbose
=
0
--
print_data
--
hex
--
fields
-
terminated
-
by
=
";"
--
source
include
/
ndb_backup_print
.
inc
--
let
ndb_restore_filter
=
test
t1
--
let
ndb_restore_opts
=--
verbose
=
0
--
print_data
--
hex
--
fields
-
terminated
-
by
=
","
--
fields
-
optionally
-
enclosed
-
by
=
"'"
--
source
include
/
ndb_backup_print
.
inc
drop
table
t1
;
# some binary char tests with trailing spaces
create
table
t1
(
pk
int
key
,
f1
CHAR
(
1
)
BINARY
,
f2
CHAR
(
32
)
BINARY
,
f3
CHAR
(
255
)
BINARY
,
g1
VARCHAR
(
32
)
BINARY
,
g2
VARCHAR
(
255
)
BINARY
,
g3
VARCHAR
(
1000
)
BINARY
,
h1
BINARY
(
1
),
h2
BINARY
(
9
),
h3
BINARY
(
255
)
,
i1
VARBINARY
(
32
),
i2
VARBINARY
(
255
),
i3
VARBINARY
(
1000
)
)
engine
ndb
;
insert
into
t1
values
(
1
,
'1'
,
'12345678901234567890123456789012'
,
'123456789 '
,
'1 '
,
'12345678901234567890123456789012 '
,
'123456789 '
,
0x20
,
0x123456789abcdef020
,
0x012345000020
,
0x1200000020
,
0x123456789abcdef000000020
,
0x00123450000020
);
create
table
t2
(
pk
int
key
,
a
int
)
engine
ndb
;
create
table
t3
(
pk
int
key
,
a
int
)
engine
ndb
;
create
table
t4
(
pk
int
key
,
a
int
)
engine
ndb
;
insert
into
t2
values
(
1
,
11
),(
2
,
12
),(
3
,
13
),(
4
,
14
),(
5
,
15
);
insert
into
t3
values
(
1
,
21
),(
2
,
22
),(
3
,
23
),(
4
,
24
),(
5
,
25
);
insert
into
t4
values
(
1
,
31
),(
2
,
32
),(
3
,
33
),(
4
,
34
),(
5
,
35
);
--
source
include
/
ndb_backup
.
inc
--
let
ndb_restore_opts
=--
verbose
=
0
--
print_data
--
hex
--
fields
-
enclosed
-
by
=
"'"
--
fields
-
optionally
-
enclosed
-
by
=
"X"
--
let
ndb_restore_filter
=
test
t1
--
source
include
/
ndb_backup_print
.
inc
--
exec
rm
-
f
$MYSQLTEST_VARDIR
/
tmp
/
t1
.
txt
--
exec
rm
-
f
$MYSQLTEST_VARDIR
/
tmp
/
t2
.
txt
--
exec
rm
-
f
$MYSQLTEST_VARDIR
/
tmp
/
t3
.
txt
--
exec
rm
-
f
$MYSQLTEST_VARDIR
/
tmp
/
t4
.
txt
--
let
ndb_restore_opts
=--
verbose
=
0
--
print_data
--
hex
--
tab
$MYSQLTEST_VARDIR
/
tmp
--
append
--
let
ndb_restore_filter
=
test
--
source
include
/
ndb_backup_print
.
inc
--
let
$message
=
t1
--
source
include
/
show_msg
.
inc
--
exec
sort
$MYSQLTEST_VARDIR
/
tmp
/
t1
.
txt
--
let
$message
=
t2
--
source
include
/
show_msg
.
inc
--
exec
sort
$MYSQLTEST_VARDIR
/
tmp
/
t2
.
txt
--
let
$message
=
t3
--
source
include
/
show_msg
.
inc
--
exec
sort
$MYSQLTEST_VARDIR
/
tmp
/
t3
.
txt
--
let
$message
=
t4
--
source
include
/
show_msg
.
inc
--
exec
sort
$MYSQLTEST_VARDIR
/
tmp
/
t4
.
txt
--
exec
rm
-
f
$MYSQLTEST_VARDIR
/
tmp
/
t1
.
txt
--
exec
rm
-
f
$MYSQLTEST_VARDIR
/
tmp
/
t2
.
txt
--
exec
rm
-
f
$MYSQLTEST_VARDIR
/
tmp
/
t3
.
txt
--
exec
rm
-
f
$MYSQLTEST_VARDIR
/
tmp
/
t4
.
txt
# now test some other datatypes
drop
table
t1
;
create
table
t1
(
pk
int
key
,
a1
MEDIUMINT
,
a2
MEDIUMINT
UNSIGNED
)
engine
ndb
;
# max values
insert
into
t1
values
(
1
,
8388607
,
16777215
);
# min values
insert
into
t1
values
(
2
,
-
8388608
,
0
);
# small values
insert
into
t1
values
(
3
,
-
1
,
1
);
# backup and print
--
source
include
/
ndb_backup
.
inc
--
let
ndb_restore_filter
=
test
t1
--
let
ndb_restore_opts
=--
verbose
=
0
--
print_data
--
hex
--
fields
-
terminated
-
by
=
";"
--
source
include
/
ndb_backup_print
.
inc
# clean up
drop
table
t1
;
drop
table
t2
;
drop
table
t3
;
drop
table
t4
;
ndb/include/ndbapi/NdbRecAttr.hpp
View file @
210d0550
...
...
@@ -145,6 +145,13 @@ public:
*/
Int32
int32_value
()
const
;
/**
* Get value stored in NdbRecAttr object.
*
* @return Medium value.
*/
Int32
medium_value
()
const
;
/**
* Get value stored in NdbRecAttr object.
*
...
...
@@ -173,6 +180,13 @@ public:
*/
Uint32
u_32_value
()
const
;
/**
* Get value stored in NdbRecAttr object.
*
* @return Unsigned medium value.
*/
Uint32
u_medium_value
()
const
;
/**
* Get value stored in NdbRecAttr object.
*
...
...
@@ -318,6 +332,16 @@ NdbRecAttr::int32_value() const
return
*
(
Int32
*
)
theRef
;
}
inline
Int32
NdbRecAttr
::
medium_value
()
const
{
Uint32
tmp
=
*
(
Uint32
*
)
theRef
;
if
(
tmp
&
(
0x1
<<
23
))
tmp
|=
(
0xFF
<<
24
);
return
(
Int32
)
tmp
;
}
inline
short
NdbRecAttr
::
short_value
()
const
...
...
@@ -339,6 +363,13 @@ NdbRecAttr::u_32_value() const
return
*
(
Uint32
*
)
theRef
;
}
inline
Uint32
NdbRecAttr
::
u_medium_value
()
const
{
return
*
(
Uint32
*
)
theRef
;
}
inline
Uint16
NdbRecAttr
::
u_short_value
()
const
...
...
@@ -441,6 +472,25 @@ NdbRecAttr::isNULL() const
class
NdbOut
&
operator
<<
(
class
NdbOut
&
,
const
NdbRecAttr
&
);
class
NdbRecordPrintFormat
{
public:
NdbRecordPrintFormat
();
virtual
~
NdbRecordPrintFormat
();
const
char
*
lines_terminated_by
;
const
char
*
fields_terminated_by
;
const
char
*
start_array_enclosure
;
const
char
*
end_array_enclosure
;
const
char
*
fields_enclosed_by
;
const
char
*
fields_optionally_enclosed_by
;
const
char
*
hex_prefix
;
const
char
*
null_string
;
int
hex_format
;
};
NdbOut
&
ndbrecattr_print_formatted
(
NdbOut
&
out
,
const
NdbRecAttr
&
r
,
const
NdbRecordPrintFormat
&
f
);
#endif // ifndef DOXYGEN_SHOULD_SKIP_INTERNAL
#endif
...
...
ndb/include/util/OutputStream.hpp
View file @
210d0550
...
...
@@ -35,7 +35,8 @@ class FileOutputStream : public OutputStream {
FILE
*
f
;
public:
FileOutputStream
(
FILE
*
file
=
stdout
);
FILE
*
getFile
()
{
return
f
;
}
int
print
(
const
char
*
fmt
,
...);
int
println
(
const
char
*
fmt
,
...);
void
flush
()
{
fflush
(
f
);
}
...
...
ndb/src/kernel/blocks/dbtc/Dbtc.hpp
View file @
210d0550
...
...
@@ -1838,9 +1838,14 @@ private:
Uint32
transid2
);
void
removeMarkerForFailedAPI
(
Signal
*
signal
,
Uint32
nodeId
,
Uint32
bucket
);
bool
getAllowStartTransaction
()
const
{
if
(
getNodeState
().
getSingleUserMode
())
return
true
;
bool
getAllowStartTransaction
(
Uint32
nodeId
)
const
{
if
(
unlikely
(
getNodeState
().
getSingleUserMode
()))
{
if
(
getNodeState
().
getSingleUserApi
()
==
nodeId
)
return
true
;
else
return
false
;
}
return
getNodeState
().
startLevel
<
NodeState
::
SL_STOPPING_2
;
}
...
...
ndb/src/kernel/blocks/dbtc/DbtcMain.cpp
View file @
210d0550
...
...
@@ -1199,16 +1199,14 @@ void Dbtc::execTCSEIZEREQ(Signal* signal)
const
NodeId
senderNodeId
=
refToNode
(
tapiBlockref
);
const
bool
local
=
senderNodeId
==
getOwnNodeId
()
||
senderNodeId
==
0
;
if
(
!
(
senderNodeId
==
getNodeState
().
getSingleUserApi
())
&&
!
getNodeState
().
getSingleUserMode
())
{
if
(
!
(
sl
==
NodeState
::
SL_SINGLEUSER
&&
senderNodeId
==
getNodeState
().
getSingleUserApi
()))
{
{
{
if
(
!
(
sl
==
NodeState
::
SL_STARTED
||
(
sl
==
NodeState
::
SL_STARTING
&&
local
==
true
)))
{
jam
();
Uint32
errCode
;
if
(
!
(
sl
==
NodeState
::
SL_SINGLEUSER
&&
local
)
)
Uint32
errCode
=
0
;
if
(
!
local
)
{
switch
(
sl
){
case
NodeState
:
:
SL_STARTING
:
...
...
@@ -1216,6 +1214,9 @@ void Dbtc::execTCSEIZEREQ(Signal* signal)
break
;
case
NodeState
:
:
SL_STOPPING_1
:
case
NodeState
:
:
SL_STOPPING_2
:
if
(
getNodeState
().
getSingleUserMode
()
&&
getNodeState
().
getSingleUserApi
()
==
senderNodeId
)
break
;
case
NodeState
:
:
SL_STOPPING_3
:
case
NodeState
:
:
SL_STOPPING_4
:
if
(
getNodeState
().
stopping
.
systemShutdown
)
...
...
@@ -1224,16 +1225,21 @@ void Dbtc::execTCSEIZEREQ(Signal* signal)
errCode
=
ZNODE_SHUTDOWN_IN_PROGRESS
;
break
;
case
NodeState
:
:
SL_SINGLEUSER
:
if
(
getNodeState
().
getSingleUserApi
()
==
senderNodeId
)
break
;
errCode
=
ZCLUSTER_IN_SINGLEUSER_MODE
;
break
;
default:
errCode
=
ZWRONG_STATE
;
break
;
}
signal
->
theData
[
0
]
=
tapiPointer
;
signal
->
theData
[
1
]
=
errCode
;
sendSignal
(
tapiBlockref
,
GSN_TCSEIZEREF
,
signal
,
2
,
JBB
);
return
;
if
(
errCode
)
{
signal
->
theData
[
0
]
=
tapiPointer
;
signal
->
theData
[
1
]
=
errCode
;
sendSignal
(
tapiBlockref
,
GSN_TCSEIZEREF
,
signal
,
2
,
JBB
);
return
;
}
}
//if (!(sl == SL_SINGLEUSER))
}
//if
}
...
...
@@ -1720,8 +1726,14 @@ Dbtc::TCKEY_abort(Signal* signal, int place)
* Initialize object before starting error handling
*/
initApiConnectRec
(
signal
,
apiConnectptr
.
p
,
true
);
start_failure:
switch
(
getNodeState
().
startLevel
){
case
NodeState
:
:
SL_STOPPING_2
:
if
(
getNodeState
().
getSingleUserMode
())
{
terrorCode
=
ZCLUSTER_IN_SINGLEUSER_MODE
;
break
;
}
case
NodeState
:
:
SL_STOPPING_3
:
case
NodeState
:
:
SL_STOPPING_4
:
if
(
getNodeState
().
stopping
.
systemShutdown
)
...
...
@@ -1732,6 +1744,12 @@ Dbtc::TCKEY_abort(Signal* signal, int place)
case
NodeState
:
:
SL_SINGLEUSER
:
terrorCode
=
ZCLUSTER_IN_SINGLEUSER_MODE
;
break
;
case
NodeState
:
:
SL_STOPPING_1
:
if
(
getNodeState
().
getSingleUserMode
())
{
terrorCode
=
ZCLUSTER_IN_SINGLEUSER_MODE
;
break
;
}
default:
terrorCode
=
ZWRONG_STATE
;
break
;
...
...
@@ -1753,6 +1771,13 @@ Dbtc::TCKEY_abort(Signal* signal, int place)
return
;
}
case
60
:
{
jam
();
initApiConnectRec
(
signal
,
apiConnectptr
.
p
,
true
);
apiConnectptr
.
p
->
m_exec_flag
=
1
;
goto
start_failure
;
}
default:
jam
();
systemErrorLab
(
signal
,
__LINE__
);
...
...
@@ -2481,6 +2506,7 @@ Dbtc::seizeCacheRecord(Signal* signal)
/*****************************************************************************/
void
Dbtc
::
execTCKEYREQ
(
Signal
*
signal
)
{
Uint32
sendersNodeId
=
refToNode
(
signal
->
getSendersBlockRef
());
UintR
compare_transid1
,
compare_transid2
;
UintR
titcLenAiInTckeyreq
;
UintR
TkeyLength
;
...
...
@@ -2526,7 +2552,7 @@ void Dbtc::execTCKEYREQ(Signal* signal)
regApiPtr
->
m_exec_flag
|=
TexecFlag
;
switch
(
regApiPtr
->
apiConnectstate
)
{
case
CS_CONNECTED
:{
if
(
TstartFlag
==
1
&&
getAllowStartTransaction
()
==
true
){
if
(
TstartFlag
==
1
&&
getAllowStartTransaction
(
sendersNodeId
)
==
true
){
//---------------------------------------------------------------------
// Initialise API connect record if transaction is started.
//---------------------------------------------------------------------
...
...
@@ -2534,7 +2560,7 @@ void Dbtc::execTCKEYREQ(Signal* signal)
initApiConnectRec
(
signal
,
regApiPtr
);
regApiPtr
->
m_exec_flag
=
TexecFlag
;
}
else
{
if
(
getAllowStartTransaction
()
==
true
){
if
(
getAllowStartTransaction
(
sendersNodeId
)
==
true
){
/*------------------------------------------------------------------
* WE EXPECTED A START TRANSACTION. SINCE NO OPERATIONS HAVE BEEN
* RECEIVED WE INDICATE THIS BY SETTING FIRST_TC_CONNECT TO RNIL TO
...
...
@@ -2544,9 +2570,9 @@ void Dbtc::execTCKEYREQ(Signal* signal)
return
;
}
else
{
/**
* getAllowStartTransaction() == false
* getAllowStartTransaction(
sendersNodeId
) == false
*/
TCKEY_abort
(
signal
,
57
);
TCKEY_abort
(
signal
,
TexecFlag
?
60
:
57
);
return
;
}
//if
}
...
...
@@ -6161,9 +6187,11 @@ and otherwise we spread it out 310 ms.
void
Dbtc
::
timeOutLoopStartLab
(
Signal
*
signal
,
Uint32
api_con_ptr
)
{
Uint32
end_ptr
,
time_passed
,
time_out_value
,
mask_value
;
Uint32
old_mask_value
=
0
;
const
Uint32
api_con_sz
=
capiConnectFilesize
;
const
Uint32
tc_timer
=
ctcTimer
;
const
Uint32
time_out_param
=
ctimeOutValue
;
const
Uint32
old_time_out_param
=
c_abortRec
.
oldTimeOutValue
;
ctimeOutCheckHeartbeat
=
tc_timer
;
...
...
@@ -6184,11 +6212,39 @@ void Dbtc::timeOutLoopStartLab(Signal* signal, Uint32 api_con_ptr)
jam
();
mask_value
=
31
;
}
if
(
time_out_param
!=
old_time_out_param
&&
getNodeState
().
getSingleUserMode
())
{
// abort during single user mode, use old_mask_value as flag
// and calculate value to be used for connections with allowed api
if
(
old_time_out_param
>
300
)
{
jam
();
old_mask_value
=
63
;
}
else
if
(
old_time_out_param
<
30
)
{
jam
();
old_mask_value
=
7
;
}
else
{
jam
();
old_mask_value
=
31
;
}
}
for
(
;
api_con_ptr
<
end_ptr
;
api_con_ptr
++
)
{
Uint32
api_timer
=
getApiConTimer
(
api_con_ptr
);
jam
();
if
(
api_timer
!=
0
)
{
time_out_value
=
time_out_param
+
(
api_con_ptr
&
mask_value
);
if
(
unlikely
(
old_mask_value
))
// abort during single user mode
{
apiConnectptr
.
i
=
api_con_ptr
;
ptrCheckGuard
(
apiConnectptr
,
capiConnectFilesize
,
apiConnectRecord
);
if
(
getNodeState
().
getSingleUserApi
()
==
refToNode
(
apiConnectptr
.
p
->
ndbapiBlockref
))
{
// api allowed during single user, use original timeout
time_out_value
=
old_time_out_param
+
(
api_con_ptr
&
old_mask_value
);
}
}
time_passed
=
tc_timer
-
api_timer
;
if
(
time_passed
>
time_out_value
)
{
...
...
@@ -6805,6 +6861,33 @@ void Dbtc::timeOutFoundFragLab(Signal* signal, UintR TscanConPtr)
c_scan_frag_pool
.
getPtr
(
ptr
,
TscanConPtr
);
DEBUG
(
TscanConPtr
<<
" timeOutFoundFragLab: scanFragState = "
<<
ptr
.
p
->
scanFragState
);
const
Uint32
time_out_param
=
ctimeOutValue
;
const
Uint32
old_time_out_param
=
c_abortRec
.
oldTimeOutValue
;
if
(
unlikely
(
time_out_param
!=
old_time_out_param
&&
getNodeState
().
getSingleUserMode
()))
{
jam
();
ScanRecordPtr
scanptr
;
scanptr
.
i
=
ptr
.
p
->
scanRec
;
ptrCheckGuard
(
scanptr
,
cscanrecFileSize
,
scanRecord
);
ApiConnectRecordPtr
TlocalApiConnectptr
;
TlocalApiConnectptr
.
i
=
scanptr
.
p
->
scanApiRec
;
ptrCheckGuard
(
TlocalApiConnectptr
,
capiConnectFilesize
,
apiConnectRecord
);
if
(
refToNode
(
TlocalApiConnectptr
.
p
->
ndbapiBlockref
)
==
getNodeState
().
getSingleUserApi
())
{
jam
();
Uint32
val
=
ctcTimer
-
ptr
.
p
->
scanFragTimer
;
if
(
val
<=
old_time_out_param
)
{
jam
();
goto
next
;
}
}
}
/*-------------------------------------------------------------------------*/
// The scan fragment has expired its timeout. Check its state to decide
// what to do.
...
...
@@ -6866,6 +6949,7 @@ void Dbtc::timeOutFoundFragLab(Signal* signal, UintR TscanConPtr)
break
;
}
//switch
next:
signal
->
theData
[
0
]
=
TcContinueB
::
ZCONTINUE_TIME_OUT_FRAG_CONTROL
;
signal
->
theData
[
1
]
=
TscanConPtr
+
1
;
sendSignal
(
cownref
,
GSN_CONTINUEB
,
signal
,
2
,
JBB
);
...
...
@@ -8696,6 +8780,14 @@ void Dbtc::execSCAN_TABREQ(Signal* signal)
}
}
if
(
getNodeState
().
startLevel
==
NodeState
::
SL_SINGLEUSER
&&
getNodeState
().
getSingleUserApi
()
!=
refToNode
(
apiConnectptr
.
p
->
ndbapiBlockref
))
{
errCode
=
ZCLUSTER_IN_SINGLEUSER_MODE
;
goto
SCAN_TAB_error
;
}
seizeTcConnect
(
signal
);
tcConnectptr
.
p
->
apiConnect
=
apiConnectptr
.
i
;
tcConnectptr
.
p
->
tcConnectstate
=
OS_WAIT_SCAN
;
...
...
@@ -11009,7 +11101,7 @@ void Dbtc::execABORT_ALL_REQ(Signal* signal)
const
Uint32
senderData
=
req
->
senderData
;
const
BlockReference
senderRef
=
req
->
senderRef
;
if
(
getAllowStartTransaction
()
==
true
&&
!
getNodeState
().
getSingleUserMode
()){
if
(
getAllowStartTransaction
(
refToNode
(
senderRef
)
)
==
true
&&
!
getNodeState
().
getSingleUserMode
()){
jam
();
ref
->
senderData
=
senderData
;
...
...
@@ -11437,6 +11529,17 @@ void Dbtc::execTCINDXREQ(Signal* signal)
regApiPtr
->
transid
[
1
]
=
tcIndxReq
->
transId2
;
}
//if
if
(
getNodeState
().
startLevel
==
NodeState
::
SL_SINGLEUSER
&&
getNodeState
().
getSingleUserApi
()
!=
refToNode
(
regApiPtr
->
ndbapiBlockref
))
{
terrorCode
=
ZCLUSTER_IN_SINGLEUSER_MODE
;
regApiPtr
->
m_exec_flag
|=
TcKeyReq
::
getExecuteFlag
(
tcIndxRequestInfo
);
apiConnectptr
=
transPtr
;
abortErrorLab
(
signal
);
return
;
}
if
(
ERROR_INSERTED
(
8036
)
||
!
seizeIndexOperation
(
regApiPtr
,
indexOpPtr
))
{
jam
();
// Failed to allocate index operation
...
...
ndb/src/ndbapi/ClusterMgr.cpp
View file @
210d0550
...
...
@@ -405,7 +405,7 @@ ClusterMgr::execAPI_REGCONF(const Uint32 * theData){
node
.
m_state
=
apiRegConf
->
nodeState
;
if
(
node
.
compatible
&&
(
node
.
m_state
.
startLevel
==
NodeState
::
SL_STARTED
||
node
.
m_state
.
startLevel
==
NodeState
::
SL_SINGLEUSER
)){
node
.
m_state
.
getSingleUserMode
()
)){
set_node_alive
(
node
,
true
);
}
else
{
set_node_alive
(
node
,
false
);
...
...
ndb/src/ndbapi/Ndb.cpp
View file @
210d0550
...
...
@@ -56,6 +56,8 @@ NdbTransaction* Ndb::doConnect(Uint32 tConNode)
// We have connections now to the desired node. Return
//****************************************************************************
DBUG_RETURN
(
getConnectedNdbTransaction
(
tConNode
));
}
else
if
(
TretCode
<
0
)
{
DBUG_RETURN
(
NULL
);
}
else
if
(
TretCode
!=
0
)
{
tAnyAlive
=
1
;
}
//if
...
...
@@ -79,6 +81,8 @@ NdbTransaction* Ndb::doConnect(Uint32 tConNode)
// We have connections now to the desired node. Return
//****************************************************************************
DBUG_RETURN
(
getConnectedNdbTransaction
(
tNode
));
}
else
if
(
TretCode
<
0
)
{
DBUG_RETURN
(
NULL
);
}
else
if
(
TretCode
!=
0
)
{
tAnyAlive
=
1
;
}
//if
...
...
@@ -107,6 +111,8 @@ NdbTransaction* Ndb::doConnect(Uint32 tConNode)
// We have connections now to the desired node. Return
//****************************************************************************
DBUG_RETURN
(
getConnectedNdbTransaction
(
tNode
));
}
else
if
(
TretCode
<
0
)
{
DBUG_RETURN
(
NULL
);
}
else
if
(
TretCode
!=
0
)
{
tAnyAlive
=
1
;
}
//if
...
...
@@ -207,6 +213,11 @@ Ndb::NDB_connect(Uint32 tNode)
DBUG_PRINT
(
"info"
,
(
"unsuccessful connect tReturnCode %d, tNdbCon->Status() %d"
,
tReturnCode
,
tNdbCon
->
Status
()));
if
(
theError
.
code
==
299
)
{
// single user mode so no need to retry with other node
DBUG_RETURN
(
-
1
);
}
DBUG_RETURN
(
3
);
}
//if
}
//Ndb::NDB_connect()
...
...
ndb/src/ndbapi/NdbRecAttr.cpp
View file @
210d0550
...
...
@@ -140,8 +140,24 @@ NdbRecAttr::receive_data(const Uint32 * data, Uint32 sz){
return
false
;
}
NdbRecordPrintFormat
::
NdbRecordPrintFormat
()
{
fields_terminated_by
=
";"
;
start_array_enclosure
=
"["
;
end_array_enclosure
=
"]"
;
fields_enclosed_by
=
""
;
fields_optionally_enclosed_by
=
"
\"
"
;
lines_terminated_by
=
"
\n
"
;
hex_prefix
=
"H'"
;
null_string
=
"[NULL]"
;
hex_format
=
0
;
}
NdbRecordPrintFormat
::~
NdbRecordPrintFormat
()
{}
static
const
NdbRecordPrintFormat
default_print_format
;
static
void
ndbrecattr_print_string
(
NdbOut
&
out
,
const
char
*
type
,
ndbrecattr_print_string
(
NdbOut
&
out
,
const
NdbRecordPrintFormat
&
f
,
const
char
*
type
,
bool
is_binary
,
const
char
*
aref
,
unsigned
sz
)
{
const
unsigned
char
*
ref
=
(
const
unsigned
char
*
)
aref
;
...
...
@@ -150,6 +166,25 @@ ndbrecattr_print_string(NdbOut& out, const char *type,
for
(
i
=
sz
-
1
;
i
>=
0
;
i
--
)
if
(
ref
[
i
]
==
0
)
sz
--
;
else
break
;
if
(
!
is_binary
)
{
// trailing spaces are not printed
for
(
i
=
sz
-
1
;
i
>=
0
;
i
--
)
if
(
ref
[
i
]
==
32
)
sz
--
;
else
break
;
}
if
(
is_binary
&&
f
.
hex_format
)
{
if
(
sz
==
0
)
{
out
.
print
(
"0x0"
);
return
;
}
out
.
print
(
"0x"
);
for
(
len
=
0
;
len
<
(
int
)
sz
;
len
++
)
out
.
print
(
"%02X"
,
(
int
)
ref
[
len
]);
return
;
}
if
(
sz
==
0
)
return
;
// empty
for
(
len
=
0
;
len
<
(
int
)
sz
&&
ref
[
i
]
!=
0
;
len
++
)
...
...
@@ -170,37 +205,68 @@ ndbrecattr_print_string(NdbOut& out, const char *type,
for
(
i
=
len
+
1
;
ref
[
i
]
!=
0
;
i
++
)
out
.
print
(
"%u]"
,
len
-
i
);
assert
((
int
)
sz
>
i
);
ndbrecattr_print_string
(
out
,
type
,
aref
+
i
,
sz
-
i
);
ndbrecattr_print_string
(
out
,
f
,
type
,
is_binary
,
aref
+
i
,
sz
-
i
);
}
}
NdbOut
&
operator
<<
(
NdbOut
&
out
,
const
NdbRecAttr
&
r
)
NdbOut
&
ndbrecattr_print_formatted
(
NdbOut
&
out
,
const
NdbRecAttr
&
r
,
const
NdbRecordPrintFormat
&
f
)
{
if
(
r
.
isNULL
())
{
out
<<
"[NULL]"
;
out
<<
f
.
null_string
;
return
out
;
}
const
NdbDictionary
::
Column
*
c
=
r
.
getColumn
();
uint
length
=
c
->
getLength
();
if
(
length
>
1
)
out
<<
"["
;
for
(
Uint32
j
=
0
;
j
<
length
;
j
++
)
{
if
(
j
>
0
)
out
<<
" "
;
const
char
*
fields_optionally_enclosed_by
;
if
(
f
.
fields_enclosed_by
[
0
]
==
'\0'
)
fields_optionally_enclosed_by
=
f
.
fields_optionally_enclosed_by
;
else
fields_optionally_enclosed_by
=
""
;
out
<<
f
.
fields_enclosed_by
;
Uint32
j
;
switch
(
r
.
getType
()){
case
NdbDictionary
:
:
Column
::
Bigunsigned
:
out
<<
r
.
u_64_value
();
break
;
case
NdbDictionary
:
:
Column
::
Bit
:
out
<<
hex
<<
"H'"
<<
r
.
u_32_value
()
<<
dec
;
out
<<
f
.
hex_prefix
<<
"0x"
;
if
(
length
<
33
)
{
out
.
print
(
"%X"
,
r
.
u_32_value
());
}
else
if
(
length
<
65
)
{
out
.
print
(
"%llX"
,
r
.
u_64_value
());
}
else
{
const
unsigned
char
*
buf
=
(
unsigned
char
*
)
r
.
aRef
();
int
k
=
4
*
((
length
+
31
)
/
32
);
while
(
k
>
0
&&
(
*
(
buf
+
--
k
)
==
0
));
do
{
out
.
print
(
"%X"
,
(
Uint32
)
*
(
buf
+
k
--
));
}
while
(
k
>=
0
);
}
break
;
case
NdbDictionary
:
:
Column
::
Unsigned
:
out
<<
r
.
u_32_value
();
if
(
length
>
1
)
out
<<
f
.
start_array_enclosure
;
out
<<
*
(
Uint32
*
)
r
.
aRef
();
for
(
j
=
1
;
j
<
length
;
j
++
)
out
<<
" "
<<
*
((
Uint32
*
)
r
.
aRef
()
+
j
);
if
(
length
>
1
)
out
<<
f
.
end_array_enclosure
;
break
;
case
NdbDictionary
:
:
Column
::
Mediumunsigned
:
out
<<
r
.
u_medium_value
();
break
;
case
NdbDictionary
:
:
Column
::
Smallunsigned
:
out
<<
r
.
u_short_value
();
...
...
@@ -214,6 +280,9 @@ NdbOut& operator<<(NdbOut& out, const NdbRecAttr &r)
case
NdbDictionary
:
:
Column
::
Int
:
out
<<
r
.
int32_value
();
break
;
case
NdbDictionary
:
:
Column
::
Mediumint
:
out
<<
r
.
medium_value
();
break
;
case
NdbDictionary
:
:
Column
::
Smallint
:
out
<<
r
.
short_value
();
break
;
...
...
@@ -221,25 +290,37 @@ NdbOut& operator<<(NdbOut& out, const NdbRecAttr &r)
out
<<
(
int
)
r
.
char_value
();
break
;
case
NdbDictionary
:
:
Column
::
Binary
:
if
(
!
f
.
hex_format
)
out
<<
fields_optionally_enclosed_by
;
j
=
r
.
arraySize
();
ndbrecattr_print_string
(
out
,
"Binary"
,
r
.
aRef
(),
j
);
ndbrecattr_print_string
(
out
,
f
,
"Binary"
,
true
,
r
.
aRef
(),
j
);
if
(
!
f
.
hex_format
)
out
<<
fields_optionally_enclosed_by
;
break
;
case
NdbDictionary
:
:
Column
::
Char
:
out
<<
fields_optionally_enclosed_by
;
j
=
length
;
ndbrecattr_print_string
(
out
,
"Char"
,
r
.
aRef
(),
r
.
arraySize
());
ndbrecattr_print_string
(
out
,
f
,
"Char"
,
false
,
r
.
aRef
(),
r
.
arraySize
());
out
<<
fields_optionally_enclosed_by
;
break
;
case
NdbDictionary
:
:
Column
::
Varchar
:
{
out
<<
fields_optionally_enclosed_by
;
unsigned
len
=
*
(
const
unsigned
char
*
)
r
.
aRef
();
ndbrecattr_print_string
(
out
,
"Varchar"
,
r
.
aRef
()
+
1
,
len
);
ndbrecattr_print_string
(
out
,
f
,
"Varchar"
,
false
,
r
.
aRef
()
+
1
,
len
);
j
=
length
;
out
<<
fields_optionally_enclosed_by
;
}
break
;
case
NdbDictionary
:
:
Column
::
Varbinary
:
{
if
(
!
f
.
hex_format
)
out
<<
fields_optionally_enclosed_by
;
unsigned
len
=
*
(
const
unsigned
char
*
)
r
.
aRef
();
ndbrecattr_print_string
(
out
,
"Varbinary"
,
r
.
aRef
()
+
1
,
len
);
ndbrecattr_print_string
(
out
,
f
,
"Varbinary"
,
true
,
r
.
aRef
()
+
1
,
len
);
j
=
length
;
if
(
!
f
.
hex_format
)
out
<<
fields_optionally_enclosed_by
;
}
break
;
case
NdbDictionary
:
:
Column
::
Float
:
...
...
@@ -368,16 +449,26 @@ NdbOut& operator<<(NdbOut& out, const NdbRecAttr &r)
break
;
case
NdbDictionary
:
:
Column
::
Longvarchar
:
{
out
<<
fields_optionally_enclosed_by
;
unsigned
len
=
uint2korr
(
r
.
aRef
());
ndbrecattr_print_string
(
out
,
f
,
"Longvarchar"
,
false
,
r
.
aRef
()
+
2
,
len
);
j
=
length
;
out
<<
fields_optionally_enclosed_by
;
}
break
;
case
NdbDictionary
:
:
Column
::
Longvarbinary
:
{
if
(
!
f
.
hex_format
)
out
<<
fields_optionally_enclosed_by
;
unsigned
len
=
uint2korr
(
r
.
aRef
());
ndbrecattr_print_string
(
out
,
"Longvarchar"
,
r
.
aRef
()
+
2
,
len
);
ndbrecattr_print_string
(
out
,
f
,
"Longvarbinary"
,
true
,
r
.
aRef
()
+
2
,
len
);
j
=
length
;
if
(
!
f
.
hex_format
)
out
<<
fields_optionally_enclosed_by
;
}
break
;
case
NdbDictionary
:
:
Column
::
Undefined
:
case
NdbDictionary
:
:
Column
::
Mediumint
:
case
NdbDictionary
:
:
Column
::
Mediumunsigned
:
case
NdbDictionary
:
:
Column
::
Longvarbinary
:
unknown:
//default: /* no print functions for the rest, just print type */
out
<<
(
int
)
r
.
getType
();
...
...
@@ -386,16 +477,17 @@ NdbOut& operator<<(NdbOut& out, const NdbRecAttr &r)
out
<<
" "
<<
j
<<
" times"
;
break
;
}
}
if
(
length
>
1
)
{
out
<<
"]"
;
out
<<
f
.
fields_enclosed_by
;
}
return
out
;
}
NdbOut
&
operator
<<
(
NdbOut
&
out
,
const
NdbRecAttr
&
r
)
{
return
ndbrecattr_print_formatted
(
out
,
r
,
default_print_format
);
}
Int64
NdbRecAttr
::
int64_value
()
const
{
...
...
ndb/src/ndbapi/TransporterFacade.hpp
View file @
210d0550
...
...
@@ -315,7 +315,8 @@ inline
bool
TransporterFacade
::
get_node_stopping
(
NodeId
n
)
const
{
const
ClusterMgr
::
Node
&
node
=
theClusterMgr
->
getNodeInfo
(
n
);
return
((
node
.
m_state
.
startLevel
==
NodeState
::
SL_STOPPING_1
)
||
return
(
!
node
.
m_state
.
getSingleUserMode
()
&&
(
node
.
m_state
.
startLevel
==
NodeState
::
SL_STOPPING_1
)
||
(
node
.
m_state
.
startLevel
==
NodeState
::
SL_STOPPING_2
));
}
...
...
@@ -326,16 +327,9 @@ TransporterFacade::getIsNodeSendable(NodeId n) const {
const
Uint32
startLevel
=
node
.
m_state
.
startLevel
;
if
(
node
.
m_info
.
m_type
==
NodeInfo
::
DB
)
{
if
(
node
.
m_state
.
singleUserMode
&&
ownId
()
==
node
.
m_state
.
singleUserApi
)
{
return
(
node
.
compatible
&&
(
node
.
m_state
.
startLevel
==
NodeState
::
SL_STOPPING_1
||
node
.
m_state
.
startLevel
==
NodeState
::
SL_STARTED
||
node
.
m_state
.
startLevel
==
NodeState
::
SL_SINGLEUSER
));
}
else
return
node
.
compatible
&&
(
startLevel
==
NodeState
::
SL_STARTED
||
startLevel
==
NodeState
::
SL_STOPPING_1
);
return
node
.
compatible
&&
(
startLevel
==
NodeState
::
SL_STARTED
||
startLevel
==
NodeState
::
SL_STOPPING_1
||
node
.
m_state
.
getSingleUserMode
());
}
else
if
(
node
.
m_info
.
m_type
==
NodeInfo
::
REP
)
{
/**
* @todo Check that REP node actually has received API_REG_REQ
...
...
ndb/tools/restore/Restore.cpp
View file @
210d0550
...
...
@@ -23,6 +23,8 @@
#include <SimpleProperties.hpp>
#include <signaldata/DictTabInfo.hpp>
extern
NdbRecordPrintFormat
g_ndbrecord_print_format
;
Uint16
Twiddle16
(
Uint16
in
);
// Byte shift 16-bit data
Uint32
Twiddle32
(
Uint32
in
);
// Byte shift 32-bit data
Uint64
Twiddle64
(
Uint64
in
);
// Byte shift 64-bit data
...
...
@@ -118,6 +120,8 @@ RestoreMetaData::loadContent()
return
0
;
}
}
if
(
!
markSysTables
())
return
0
;
if
(
!
readGCPEntry
())
return
0
;
...
...
@@ -175,6 +179,49 @@ RestoreMetaData::readMetaTableDesc() {
return
parseTableDescriptor
((
Uint32
*
)
ptr
,
len
);
}
bool
RestoreMetaData
::
markSysTables
()
{
Uint32
i
;
for
(
i
=
0
;
i
<
getNoOfTables
();
i
++
)
{
TableS
*
table
=
allTables
[
i
];
table
->
m_local_id
=
i
;
const
char
*
tableName
=
table
->
getTableName
();
if
(
// XXX should use type
strcmp
(
tableName
,
"SYSTAB_0"
)
==
0
||
strcmp
(
tableName
,
"NDB$EVENTS_0"
)
==
0
||
strcmp
(
tableName
,
"sys/def/SYSTAB_0"
)
==
0
||
strcmp
(
tableName
,
"sys/def/NDB$EVENTS_0"
)
==
0
)
table
->
isSysTable
=
true
;
}
for
(
i
=
0
;
i
<
getNoOfTables
();
i
++
)
{
TableS
*
blobTable
=
allTables
[
i
];
const
char
*
blobTableName
=
blobTable
->
getTableName
();
// yet another match blob
int
cnt
,
id1
,
id2
;
char
buf
[
256
];
cnt
=
sscanf
(
blobTableName
,
"%[^/]/%[^/]/NDB$BLOB_%d_%d"
,
buf
,
buf
,
&
id1
,
&
id2
);
if
(
cnt
==
4
)
{
Uint32
j
;
for
(
j
=
0
;
j
<
getNoOfTables
();
j
++
)
{
TableS
*
table
=
allTables
[
j
];
if
(
table
->
getTableId
()
==
(
Uint32
)
id1
)
{
if
(
table
->
isSysTable
)
blobTable
->
isSysTable
=
true
;
blobTable
->
m_main_table
=
table
;
break
;
}
}
if
(
j
==
getNoOfTables
())
{
err
<<
"Restore: Bad primary table id in "
<<
blobTableName
<<
endl
;
return
false
;
}
}
}
return
true
;
}
bool
RestoreMetaData
::
readGCPEntry
()
{
...
...
@@ -259,6 +306,8 @@ TableS::TableS(Uint32 version, NdbTableImpl* tableImpl)
m_max_auto_val
=
0
;
m_noOfRecords
=
0
;
backupVersion
=
version
;
isSysTable
=
false
;
m_main_table
=
NULL
;
for
(
int
i
=
0
;
i
<
tableImpl
->
getNoOfColumns
();
i
++
)
createAttr
(
tableImpl
->
getColumn
(
i
));
...
...
@@ -704,6 +753,7 @@ bool RestoreDataIterator::readFragmentHeader(int & ret)
return
false
;
}
info
.
setLevel
(
254
);
info
<<
"_____________________________________________________"
<<
endl
<<
"Processing data in table: "
<<
m_currentTable
->
getTableName
()
<<
"("
<<
Header
.
TableId
<<
") fragment "
...
...
@@ -924,13 +974,13 @@ operator<<(NdbOut& ndbout, const AttributeS& attr){
if
(
data
.
null
)
{
ndbout
<<
"<NULL>"
;
ndbout
<<
g_ndbrecord_print_format
.
null_string
;
return
ndbout
;
}
NdbRecAttr
tmprec
(
0
);
tmprec
.
setup
(
desc
.
m_column
,
(
char
*
)
data
.
void_value
);
ndb
out
<<
tmprec
;
ndb
recattr_print_formatted
(
ndbout
,
tmprec
,
g_ndbrecord_print_format
)
;
return
ndbout
;
}
...
...
@@ -939,17 +989,15 @@ operator<<(NdbOut& ndbout, const AttributeS& attr){
NdbOut
&
operator
<<
(
NdbOut
&
ndbout
,
const
TupleS
&
tuple
)
{
ndbout
<<
tuple
.
getTable
()
->
getTableName
()
<<
"; "
;
for
(
int
i
=
0
;
i
<
tuple
.
getNoOfAttributes
();
i
++
)
{
if
(
i
>
0
)
ndbout
<<
g_ndbrecord_print_format
.
fields_terminated_by
;
AttributeData
*
attr_data
=
tuple
.
getData
(
i
);
const
AttributeDesc
*
attr_desc
=
tuple
.
getDesc
(
i
);
const
AttributeS
attr
=
{
attr_desc
,
*
attr_data
};
debug
<<
i
<<
" "
<<
attr_desc
->
m_column
->
getName
();
ndbout
<<
attr
;
if
(
i
!=
(
tuple
.
getNoOfAttributes
()
-
1
))
ndbout
<<
delimiter
<<
" "
;
}
// for
return
ndbout
;
}
...
...
ndb/tools/restore/Restore.hpp
View file @
210d0550
...
...
@@ -25,8 +25,6 @@
#include <ndb_version.h>
#include <version.h>
#define delimiter ";"
const
int
FileNameLenC
=
256
;
const
int
TableNameLenC
=
256
;
const
int
AttrNameLenC
=
256
;
...
...
@@ -143,6 +141,10 @@ class TableS {
int
pos
;
bool
isSysTable
;
TableS
*
m_main_table
;
Uint32
m_local_id
;
Uint64
m_noOfRecords
;
Vector
<
FragmentInfo
*>
m_fragmentInfo
;
...
...
@@ -156,6 +158,9 @@ public:
Uint32
getTableId
()
const
{
return
m_dictTable
->
getTableId
();
}
Uint32
getLocalId
()
const
{
return
m_local_id
;
}
Uint32
getNoOfRecords
()
const
{
return
m_noOfRecords
;
}
...
...
@@ -235,6 +240,14 @@ public:
return
allAttributesDesc
[
attributeId
];
}
bool
getSysTable
()
const
{
return
isSysTable
;
}
const
TableS
*
getMainTable
()
const
{
return
m_main_table
;
}
TableS
&
operator
=
(
TableS
&
org
)
;
};
// TableS;
...
...
@@ -285,6 +298,7 @@ class RestoreMetaData : public BackupFile {
Vector
<
TableS
*>
allTables
;
bool
readMetaFileHeader
();
bool
readMetaTableDesc
();
bool
markSysTables
();
bool
readGCPEntry
();
bool
readFragmentInfo
();
...
...
ndb/tools/restore/consumer_printer.cpp
View file @
210d0550
...
...
@@ -14,6 +14,9 @@
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
#include "consumer_printer.hpp"
extern
FilteredNdbOut
info
;
extern
NdbRecordPrintFormat
g_ndbrecord_print_format
;
extern
const
char
*
tab_path
;
bool
BackupPrinter
::
table
(
const
TableS
&
tab
)
...
...
@@ -21,7 +24,8 @@ BackupPrinter::table(const TableS & tab)
if
(
m_print
||
m_print_meta
)
{
m_ndbout
<<
tab
;
ndbout_c
(
"Successfully printed table: %s"
,
tab
.
m_dictTable
->
getName
());
info
.
setLevel
(
254
);
info
<<
"Successfully printed table: "
,
tab
.
m_dictTable
->
getName
();
}
return
true
;
}
...
...
@@ -31,7 +35,14 @@ BackupPrinter::tuple(const TupleS & tup)
{
m_dataCount
++
;
if
(
m_print
||
m_print_data
)
m_ndbout
<<
tup
<<
endl
;
{
if
(
m_ndbout
.
m_out
==
info
.
m_out
)
{
info
.
setLevel
(
254
);
info
<<
tup
.
getTable
()
->
getTableName
()
<<
"; "
;
}
m_ndbout
<<
tup
<<
g_ndbrecord_print_format
.
lines_terminated_by
;
}
}
void
...
...
@@ -47,8 +58,9 @@ BackupPrinter::endOfLogEntrys()
{
if
(
m_print
||
m_print_log
)
{
ndbout
<<
"Printed "
<<
m_dataCount
<<
" tuples and "
<<
m_logCount
<<
" log entries"
<<
" to stdout."
<<
endl
;
info
.
setLevel
(
254
);
info
<<
"Printed "
<<
m_dataCount
<<
" tuples and "
<<
m_logCount
<<
" log entries"
<<
" to stdout."
<<
endl
;
}
}
ndb/tools/restore/restore_main.cpp
View file @
210d0550
...
...
@@ -18,7 +18,9 @@
#include <Vector.hpp>
#include <ndb_limits.h>
#include <NdbTCP.h>
#include <NdbMem.h>
#include <NdbOut.hpp>
#include <OutputStream.hpp>
#include <NDBT_ReturnCodes.h>
#include "consumer_restore.hpp"
...
...
@@ -33,8 +35,18 @@ static int ga_nParallelism = 128;
static
int
ga_backupId
=
0
;
static
bool
ga_dont_ignore_systab_0
=
false
;
static
Vector
<
class
BackupConsumer
*>
g_consumers
;
static
BackupPrinter
*
g_printer
=
NULL
;
static
const
char
*
ga_backupPath
=
"."
DIR_SEPARATOR
;
static
const
char
*
default_backupPath
=
"."
DIR_SEPARATOR
;
static
const
char
*
ga_backupPath
=
default_backupPath
;
const
char
*
opt_ndb_database
=
NULL
;
const
char
*
opt_ndb_table
=
NULL
;
unsigned
int
opt_verbose
;
unsigned
int
opt_hex_format
;
Vector
<
BaseString
>
g_databases
;
Vector
<
BaseString
>
g_tables
;
NdbRecordPrintFormat
g_ndbrecord_print_format
;
NDB_STD_OPTS_VARS
;
...
...
@@ -53,6 +65,28 @@ BaseString g_options("ndb_restore");
const
char
*
load_default_groups
[]
=
{
"mysql_cluster"
,
"ndb_restore"
,
0
};
enum
ndb_restore_options
{
OPT_PRINT
=
NDB_STD_OPTIONS_LAST
,
OPT_PRINT_DATA
,
OPT_PRINT_LOG
,
OPT_PRINT_META
,
OPT_BACKUP_PATH
,
OPT_HEX_FORMAT
,
OPT_FIELDS_ENCLOSED_BY
,
OPT_FIELDS_TERMINATED_BY
,
OPT_FIELDS_OPTIONALLY_ENCLOSED_BY
,
OPT_LINES_TERMINATED_BY
,
OPT_APPEND
,
OPT_VERBOSE
};
static
const
char
*
opt_fields_enclosed_by
=
NULL
;
static
const
char
*
opt_fields_terminated_by
=
NULL
;
static
const
char
*
opt_fields_optionally_enclosed_by
=
NULL
;
static
const
char
*
opt_lines_terminated_by
=
NULL
;
static
const
char
*
tab_path
=
NULL
;
static
int
opt_append
;
static
struct
my_option
my_long_options
[]
=
{
NDB_STD_OPTS
(
"ndb_restore"
),
...
...
@@ -78,22 +112,56 @@ static struct my_option my_long_options[] =
"(parallelism can be 1 to 1024)"
,
(
gptr
*
)
&
ga_nParallelism
,
(
gptr
*
)
&
ga_nParallelism
,
0
,
GET_INT
,
REQUIRED_ARG
,
128
,
1
,
1024
,
0
,
1
,
0
},
{
"print"
,
256
,
"Print data and log to stdout"
,
{
"print"
,
OPT_PRINT
,
"Print data and log to stdout"
,
(
gptr
*
)
&
_print
,
(
gptr
*
)
&
_print
,
0
,
GET_BOOL
,
NO_ARG
,
0
,
0
,
0
,
0
,
0
,
0
},
{
"print_data"
,
257
,
"Print data to stdout"
,
{
"print_data"
,
OPT_PRINT_DATA
,
"Print data to stdout"
,
(
gptr
*
)
&
_print_data
,
(
gptr
*
)
&
_print_data
,
0
,
GET_BOOL
,
NO_ARG
,
0
,
0
,
0
,
0
,
0
,
0
},
{
"print_meta"
,
258
,
"Print meta data to stdout"
,
{
"print_meta"
,
OPT_PRINT_META
,
"Print meta data to stdout"
,
(
gptr
*
)
&
_print_meta
,
(
gptr
*
)
&
_print_meta
,
0
,
GET_BOOL
,
NO_ARG
,
0
,
0
,
0
,
0
,
0
,
0
},
{
"print_log"
,
259
,
"Print log to stdout"
,
{
"print_log"
,
OPT_PRINT_LOG
,
"Print log to stdout"
,
(
gptr
*
)
&
_print_log
,
(
gptr
*
)
&
_print_log
,
0
,
GET_BOOL
,
NO_ARG
,
0
,
0
,
0
,
0
,
0
,
0
},
{
"backup_path"
,
OPT_BACKUP_PATH
,
"Path to backup files"
,
(
gptr
*
)
&
ga_backupPath
,
(
gptr
*
)
&
ga_backupPath
,
0
,
GET_STR
,
REQUIRED_ARG
,
0
,
0
,
0
,
0
,
0
,
0
},
{
"dont_ignore_systab_0"
,
'f'
,
"Experimental. Do not ignore system table during restore."
,
(
gptr
*
)
&
ga_dont_ignore_systab_0
,
(
gptr
*
)
&
ga_dont_ignore_systab_0
,
0
,
GET_BOOL
,
NO_ARG
,
0
,
0
,
0
,
0
,
0
,
0
},
{
"fields-enclosed-by"
,
OPT_FIELDS_ENCLOSED_BY
,
"Fields are enclosed by ..."
,
(
gptr
*
)
&
opt_fields_enclosed_by
,
(
gptr
*
)
&
opt_fields_enclosed_by
,
0
,
GET_STR
,
REQUIRED_ARG
,
0
,
0
,
0
,
0
,
0
,
0
},
{
"fields-terminated-by"
,
OPT_FIELDS_TERMINATED_BY
,
"Fields are terminated by ..."
,
(
gptr
*
)
&
opt_fields_terminated_by
,
(
gptr
*
)
&
opt_fields_terminated_by
,
0
,
GET_STR
,
REQUIRED_ARG
,
0
,
0
,
0
,
0
,
0
,
0
},
{
"fields-optionally-enclosed-by"
,
OPT_FIELDS_OPTIONALLY_ENCLOSED_BY
,
"Fields are optionally enclosed by ..."
,
(
gptr
*
)
&
opt_fields_optionally_enclosed_by
,
(
gptr
*
)
&
opt_fields_optionally_enclosed_by
,
0
,
GET_STR
,
REQUIRED_ARG
,
0
,
0
,
0
,
0
,
0
,
0
},
{
"hex"
,
OPT_HEX_FORMAT
,
"print binary types in hex format"
,
(
gptr
*
)
&
opt_hex_format
,
(
gptr
*
)
&
opt_hex_format
,
0
,
GET_BOOL
,
NO_ARG
,
0
,
0
,
0
,
0
,
0
,
0
},
{
"tab"
,
'T'
,
"Creates tab separated textfile for each table to "
"given path. (creates .txt files)"
,
(
gptr
*
)
&
tab_path
,
(
gptr
*
)
&
tab_path
,
0
,
GET_STR
,
REQUIRED_ARG
,
0
,
0
,
0
,
0
,
0
,
0
},
{
"append"
,
OPT_APPEND
,
"for --tab append data to file"
,
(
gptr
*
)
&
opt_append
,
(
gptr
*
)
&
opt_append
,
0
,
GET_BOOL
,
NO_ARG
,
0
,
0
,
0
,
0
,
0
,
0
},
{
"lines-terminated-by"
,
OPT_LINES_TERMINATED_BY
,
""
,
(
gptr
*
)
&
opt_lines_terminated_by
,
(
gptr
*
)
&
opt_lines_terminated_by
,
0
,
GET_STR
,
REQUIRED_ARG
,
0
,
0
,
0
,
0
,
0
,
0
},
{
"verbose"
,
OPT_VERBOSE
,
"verbosity"
,
(
gptr
*
)
&
opt_verbose
,
(
gptr
*
)
&
opt_verbose
,
0
,
GET_INT
,
REQUIRED_ARG
,
1
,
0
,
255
,
0
,
0
,
0
},
{
0
,
0
,
0
,
0
,
0
,
0
,
GET_NO_ARG
,
NO_ARG
,
0
,
0
,
0
,
0
,
0
,
0
}
};
...
...
@@ -119,19 +187,26 @@ get_one_option(int optid, const struct my_option *opt __attribute__((unused)),
#endif
ndb_std_get_one_option
(
optid
,
opt
,
argument
);
switch
(
optid
)
{
case
OPT_VERBOSE
:
info
.
setThreshold
(
255
-
opt_verbose
);
break
;
case
'n'
:
if
(
ga_nodeId
==
0
)
{
printf
(
"Error in --nodeid,-n setting, see --help
\n
"
)
;
err
<<
"Error in --nodeid,-n setting, see --help"
;
exit
(
NDBT_ProgramExit
(
NDBT_WRONGARGS
));
}
info
.
setLevel
(
254
);
info
<<
"Nodeid = "
<<
ga_nodeId
<<
endl
;
break
;
case
'b'
:
if
(
ga_backupId
==
0
)
{
printf
(
"Error in --backupid,-b setting, see --help
\n
"
)
;
err
<<
"Error in --backupid,-b setting, see --help"
;
exit
(
NDBT_ProgramExit
(
NDBT_WRONGARGS
));
}
info
.
setLevel
(
254
);
info
<<
"Backup Id = "
<<
ga_backupId
<<
endl
;
break
;
}
return
0
;
...
...
@@ -139,20 +214,26 @@ get_one_option(int optid, const struct my_option *opt __attribute__((unused)),
bool
readArguments
(
int
*
pargc
,
char
***
pargv
)
{
Uint32
i
;
debug
<<
"Load defaults"
<<
endl
;
const
char
*
load_default_groups
[]
=
{
"mysql_cluster"
,
"ndb_restore"
,
0
};
load_defaults
(
"my"
,
load_default_groups
,
pargc
,
pargv
);
debug
<<
"handle_options"
<<
endl
;
if
(
handle_options
(
pargc
,
pargv
,
my_long_options
,
get_one_option
))
{
exit
(
NDBT_ProgramExit
(
NDBT_WRONGARGS
));
}
BackupPrinter
*
printer
=
new
BackupPrinter
();
if
(
printer
==
NULL
)
g_
printer
=
new
BackupPrinter
();
if
(
g_
printer
==
NULL
)
return
false
;
BackupRestore
*
restore
=
new
BackupRestore
(
ga_nParallelism
);
if
(
restore
==
NULL
)
{
delete
printer
;
delete
g_printer
;
g_printer
=
NULL
;
return
false
;
}
...
...
@@ -160,22 +241,22 @@ readArguments(int *pargc, char*** pargv)
{
ga_print
=
true
;
ga_restore
=
true
;
printer
->
m_print
=
true
;
g_
printer
->
m_print
=
true
;
}
if
(
_print_meta
)
{
ga_print
=
true
;
printer
->
m_print_meta
=
true
;
g_
printer
->
m_print_meta
=
true
;
}
if
(
_print_data
)
{
ga_print
=
true
;
printer
->
m_print_data
=
true
;
g_
printer
->
m_print_data
=
true
;
}
if
(
_print_log
)
{
ga_print
=
true
;
printer
->
m_print_log
=
true
;
g_
printer
->
m_print_log
=
true
;
}
if
(
_restore_data
)
...
...
@@ -191,19 +272,64 @@ readArguments(int *pargc, char*** pargv)
}
{
BackupConsumer
*
c
=
printer
;
BackupConsumer
*
c
=
g_
printer
;
g_consumers
.
push_back
(
c
);
}
{
BackupConsumer
*
c
=
restore
;
g_consumers
.
push_back
(
c
);
}
// Set backup file path
if
(
*
pargv
[
0
]
!=
NULL
)
for
(;;)
{
ga_backupPath
=
*
pargv
[
0
];
int
i
=
0
;
if
(
ga_backupPath
==
default_backupPath
)
{
// Set backup file path
if
((
*
pargv
)[
i
]
==
NULL
)
break
;
ga_backupPath
=
(
*
pargv
)[
i
++
];
}
if
((
*
pargv
)[
i
]
==
NULL
)
break
;
g_databases
.
push_back
((
*
pargv
)[
i
++
]);
while
((
*
pargv
)[
i
]
!=
NULL
)
{
g_tables
.
push_back
((
*
pargv
)[
i
++
]);
}
break
;
}
info
.
setLevel
(
254
);
info
<<
"backup path = "
<<
ga_backupPath
<<
endl
;
if
(
g_databases
.
size
()
>
0
)
{
info
<<
"Restoring only from database "
<<
g_databases
[
0
].
c_str
()
<<
endl
;
if
(
g_tables
.
size
()
>
0
)
info
<<
"Restoring only tables:"
;
for
(
unsigned
i
=
0
;
i
<
g_tables
.
size
();
i
++
)
{
info
<<
" "
<<
g_tables
[
i
].
c_str
();
}
if
(
g_tables
.
size
()
>
0
)
info
<<
endl
;
}
/*
the below formatting follows the formatting from mysqldump
do not change unless to adopt to changes in mysqldump
*/
g_ndbrecord_print_format
.
fields_enclosed_by
=
opt_fields_enclosed_by
?
opt_fields_enclosed_by
:
""
;
g_ndbrecord_print_format
.
fields_terminated_by
=
opt_fields_terminated_by
?
opt_fields_terminated_by
:
"
\t
"
;
g_ndbrecord_print_format
.
fields_optionally_enclosed_by
=
opt_fields_optionally_enclosed_by
?
opt_fields_optionally_enclosed_by
:
""
;
g_ndbrecord_print_format
.
lines_terminated_by
=
opt_lines_terminated_by
?
opt_lines_terminated_by
:
"
\n
"
;
if
(
g_ndbrecord_print_format
.
fields_optionally_enclosed_by
[
0
]
==
'\0'
)
g_ndbrecord_print_format
.
null_string
=
"
\\
N"
;
else
g_ndbrecord_print_format
.
null_string
=
""
;
g_ndbrecord_print_format
.
hex_prefix
=
""
;
g_ndbrecord_print_format
.
hex_format
=
opt_hex_format
;
return
true
;
}
...
...
@@ -215,14 +341,81 @@ clearConsumers()
g_consumers
.
clear
();
}
static
bool
checkSysTable
(
const
char
*
tableName
)
static
inline
bool
checkSysTable
(
const
TableS
*
table
)
{
return
ga_dont_ignore_systab_0
||
!
table
->
getSysTable
();
}
static
inline
bool
checkSysTable
(
const
RestoreMetaData
&
metaData
,
uint
i
)
{
return
ga_dont_ignore_systab_0
||
(
strcmp
(
tableName
,
"SYSTAB_0"
)
!=
0
&&
strcmp
(
tableName
,
"NDB$EVENTS_0"
)
!=
0
&&
strcmp
(
tableName
,
"sys/def/SYSTAB_0"
)
!=
0
&&
strcmp
(
tableName
,
"sys/def/NDB$EVENTS_0"
)
!=
0
);
assert
(
i
<
metaData
.
getNoOfTables
());
return
checkSysTable
(
metaData
[
i
]);
}
static
inline
bool
isBlobTable
(
const
TableS
*
table
)
{
return
table
->
getMainTable
()
!=
NULL
;
}
static
inline
bool
isIndex
(
const
TableS
*
table
)
{
const
NdbTableImpl
&
tmptab
=
NdbTableImpl
::
getImpl
(
*
table
->
m_dictTable
);
return
(
int
)
tmptab
.
m_indexType
!=
(
int
)
NdbDictionary
::
Index
::
Undefined
;
}
static
inline
bool
checkDbAndTableName
(
const
TableS
*
table
)
{
if
(
g_tables
.
size
()
==
0
&&
g_databases
.
size
()
==
0
)
return
true
;
if
(
g_databases
.
size
()
==
0
)
g_databases
.
push_back
(
"TEST_DB"
);
// Filter on the main table name for indexes and blobs
const
char
*
table_name
;
if
(
isBlobTable
(
table
))
table_name
=
table
->
getMainTable
()
->
getTableName
();
else
if
(
isIndex
(
table
))
table_name
=
NdbTableImpl
::
getImpl
(
*
table
->
m_dictTable
).
m_primaryTable
.
c_str
();
else
table_name
=
table
->
getTableName
();
unsigned
i
;
for
(
i
=
0
;
i
<
g_databases
.
size
();
i
++
)
{
if
(
strncmp
(
table_name
,
g_databases
[
i
].
c_str
(),
g_databases
[
i
].
length
())
==
0
&&
table_name
[
g_databases
[
i
].
length
()]
==
'/'
)
{
// we have a match
if
(
g_databases
.
size
()
>
1
||
g_tables
.
size
()
==
0
)
return
true
;
break
;
}
}
if
(
i
==
g_databases
.
size
())
return
false
;
// no match found
while
(
*
table_name
!=
'/'
)
table_name
++
;
table_name
++
;
while
(
*
table_name
!=
'/'
)
table_name
++
;
table_name
++
;
for
(
i
=
0
;
i
<
g_tables
.
size
();
i
++
)
{
if
(
strcmp
(
table_name
,
g_tables
[
i
].
c_str
())
==
0
)
{
// we have a match
return
true
;
}
}
return
false
;
}
static
void
...
...
@@ -247,6 +440,7 @@ main(int argc, char** argv)
{
NDB_INIT
(
argv
[
0
]);
debug
<<
"Start readArguments"
<<
endl
;
if
(
!
readArguments
(
&
argc
,
&
argv
))
{
exitHandler
(
NDBT_FAILED
);
...
...
@@ -265,10 +459,11 @@ main(int argc, char** argv)
/**
* we must always load meta data, even if we will only print it to stdout
*/
debug
<<
"Start restoring meta data"
<<
endl
;
RestoreMetaData
metaData
(
ga_backupPath
,
ga_nodeId
,
ga_backupId
);
if
(
!
metaData
.
readHeader
())
{
ndbout
<<
"Failed to read "
<<
metaData
.
getFilename
()
<<
endl
<<
endl
;
err
<<
"Failed to read "
<<
metaData
.
getFilename
()
<<
endl
<<
endl
;
exitHandler
(
NDBT_FAILED
);
}
...
...
@@ -276,66 +471,108 @@ main(int argc, char** argv)
const
Uint32
version
=
tmp
.
NdbVersion
;
char
buf
[
NDB_VERSION_STRING_BUF_SZ
];
ndbout
<<
"Ndb version in backup files: "
<<
getVersionString
(
version
,
0
,
buf
,
sizeof
(
buf
))
<<
endl
;
info
.
setLevel
(
254
);
info
<<
"Ndb version in backup files: "
<<
getVersionString
(
version
,
0
,
buf
,
sizeof
(
buf
))
<<
endl
;
/**
* check wheater we can restore the backup (right version).
*/
if
(
version
>
NDB_VERSION
)
{
err
<<
"Restore program older than backup version. Not supported. "
<<
"Use new restore program"
<<
endl
;
exitHandler
(
NDBT_FAILED
);
}
debug
<<
"Load content"
<<
endl
;
int
res
=
metaData
.
loadContent
();
if
(
res
==
0
)
{
ndbout_c
(
"Restore: Failed to load content"
)
;
err
<<
"Restore: Failed to load content"
<<
endl
;
exitHandler
(
NDBT_FAILED
);
}
debug
<<
"Get no of Tables"
<<
endl
;
if
(
metaData
.
getNoOfTables
()
==
0
)
{
ndbout_c
(
"Restore: The backup contains no tables "
)
;
err
<<
"The backup contains no tables"
<<
endl
;
exitHandler
(
NDBT_FAILED
);
}
debug
<<
"Validate Footer"
<<
endl
;
if
(
!
metaData
.
validateFooter
())
{
ndbout_c
(
"Restore: Failed to validate footer."
)
;
err
<<
"Restore: Failed to validate footer."
<<
endl
;
exitHandler
(
NDBT_FAILED
);
}
debug
<<
"Init Backup objects"
<<
endl
;
Uint32
i
;
for
(
i
=
0
;
i
<
g_consumers
.
size
();
i
++
)
{
if
(
!
g_consumers
[
i
]
->
init
())
{
clearConsumers
();
err
<<
"Failed to initialize consumers"
<<
endl
;
exitHandler
(
NDBT_FAILED
);
}
}
Vector
<
OutputStream
*>
table_output
(
metaData
.
getNoOfTables
());
debug
<<
"Restoring tables"
<<
endl
;
for
(
i
=
0
;
i
<
metaData
.
getNoOfTables
();
i
++
)
{
if
(
checkSysTable
(
metaData
[
i
]
->
getTableName
()))
const
TableS
*
table
=
metaData
[
i
];
table_output
.
push_back
(
NULL
);
if
(
!
checkDbAndTableName
(
table
))
continue
;
if
(
checkSysTable
(
table
))
{
if
(
!
tab_path
||
isBlobTable
(
table
)
||
isIndex
(
table
))
{
table_output
[
i
]
=
ndbout
.
m_out
;
}
else
{
FILE
*
res
;
char
filename
[
FN_REFLEN
],
tmp_path
[
FN_REFLEN
];
const
char
*
table_name
;
table_name
=
table
->
getTableName
();
while
(
*
table_name
!=
'/'
)
table_name
++
;
table_name
++
;
while
(
*
table_name
!=
'/'
)
table_name
++
;
table_name
++
;
convert_dirname
(
tmp_path
,
tab_path
,
NullS
);
res
=
my_fopen
(
fn_format
(
filename
,
table_name
,
tmp_path
,
".txt"
,
4
),
opt_append
?
O_WRONLY
|
O_APPEND
|
O_CREAT
:
O_WRONLY
|
O_TRUNC
|
O_CREAT
,
MYF
(
MY_WME
));
if
(
res
==
0
)
{
exitHandler
(
NDBT_FAILED
);
}
FileOutputStream
*
f
=
new
FileOutputStream
(
res
);
table_output
[
i
]
=
f
;
}
for
(
Uint32
j
=
0
;
j
<
g_consumers
.
size
();
j
++
)
if
(
!
g_consumers
[
j
]
->
table
(
*
metaData
[
i
]
))
if
(
!
g_consumers
[
j
]
->
table
(
*
table
))
{
ndbout_c
(
"Restore: Failed to restore table: %s. "
"Exiting..."
,
metaData
[
i
]
->
getTableName
());
err
<<
"Restore: Failed to restore table: "
;
err
<<
table
->
getTableName
()
<<
" ... Exiting "
<<
endl
;
exitHandler
(
NDBT_FAILED
);
}
}
}
}
debug
<<
"Close tables"
<<
endl
;
for
(
i
=
0
;
i
<
g_consumers
.
size
();
i
++
)
if
(
!
g_consumers
[
i
]
->
endOfTables
())
{
ndbout_c
(
"Restore: Failed while closing tables"
)
;
err
<<
"Restore: Failed while closing tables"
<<
endl
;
exitHandler
(
NDBT_FAILED
);
}
debug
<<
"Iterate over data"
<<
endl
;
if
(
ga_restore
||
ga_print
)
{
if
(
_restore_data
||
_print_data
)
...
...
@@ -345,7 +582,7 @@ main(int argc, char** argv)
// Read data file header
if
(
!
dataIter
.
readHeader
())
{
ndbout
<<
"Failed to read header of data file. Exiting..."
;
err
<<
"Failed to read header of data file. Exiting..."
<<
endl
;
exitHandler
(
NDBT_FAILED
);
}
...
...
@@ -355,20 +592,26 @@ main(int argc, char** argv)
const
TupleS
*
tuple
;
while
((
tuple
=
dataIter
.
getNextTuple
(
res
=
1
))
!=
0
)
{
if
(
checkSysTable
(
tuple
->
getTable
()
->
getTableName
()))
for
(
Uint32
j
=
0
;
j
<
g_consumers
.
size
();
j
++
)
g_consumers
[
j
]
->
tuple
(
*
tuple
);
const
TableS
*
table
=
tuple
->
getTable
();
OutputStream
*
output
=
table_output
[
table
->
getLocalId
()];
if
(
!
output
)
continue
;
OutputStream
*
tmp
=
ndbout
.
m_out
;
ndbout
.
m_out
=
output
;
for
(
Uint32
j
=
0
;
j
<
g_consumers
.
size
();
j
++
)
g_consumers
[
j
]
->
tuple
(
*
tuple
);
ndbout
.
m_out
=
tmp
;
}
// while (tuple != NULL);
if
(
res
<
0
)
{
ndbout_c
(
"Restore: An error occured while restoring data. "
"Exiting..."
)
;
err
<<
" Restore: An error occured while restoring data. Exiting..."
;
err
<<
endl
;
exitHandler
(
NDBT_FAILED
);
}
if
(
!
dataIter
.
validateFragmentFooter
())
{
ndbout_c
(
"Restore: Error validating fragment footer. "
"Exiting..."
)
;
err
<<
"Restore: Error validating fragment footer. "
;
err
<<
"Exiting..."
<<
endl
;
exitHandler
(
NDBT_FAILED
);
}
}
// while (dataIter.readFragmentHeader(res))
...
...
@@ -376,7 +619,7 @@ main(int argc, char** argv)
if
(
res
<
0
)
{
err
<<
"Restore: An error occured while restoring data. Exiting... "
<<
"res="
<<
res
<<
endl
;
<<
"res=
"
<<
res
<<
endl
;
exitHandler
(
NDBT_FAILED
);
}
...
...
@@ -399,9 +642,12 @@ main(int argc, char** argv)
const
LogEntry
*
logEntry
=
0
;
while
((
logEntry
=
logIter
.
getNextLogEntry
(
res
=
0
))
!=
0
)
{
if
(
checkSysTable
(
logEntry
->
m_table
->
getTableName
()))
for
(
Uint32
j
=
0
;
j
<
g_consumers
.
size
();
j
++
)
g_consumers
[
j
]
->
logEntry
(
*
logEntry
);
const
TableS
*
table
=
logEntry
->
m_table
;
OutputStream
*
output
=
table_output
[
table
->
getLocalId
()];
if
(
!
output
)
continue
;
for
(
Uint32
j
=
0
;
j
<
g_consumers
.
size
();
j
++
)
g_consumers
[
j
]
->
logEntry
(
*
logEntry
);
}
if
(
res
<
0
)
{
...
...
@@ -418,17 +664,17 @@ main(int argc, char** argv)
{
for
(
i
=
0
;
i
<
metaData
.
getNoOfTables
();
i
++
)
{
if
(
checkSysTable
(
metaData
[
i
]
->
getTableName
()))
{
for
(
Uint32
j
=
0
;
j
<
g_consumers
.
size
();
j
++
)
if
(
!
g_consumers
[
j
]
->
finalize_table
(
*
metaData
[
i
]))
{
ndbout_c
(
"Restore: Failed to finalize restore table: %s. "
"Exiting..."
,
metaData
[
i
]
->
getTableName
())
;
exitHandler
(
NDBT_FAILED
)
;
}
}
const
TableS
*
table
=
metaData
[
i
];
OutputStream
*
output
=
table_output
[
table
->
getLocalId
()];
if
(
!
output
)
continue
;
for
(
Uint32
j
=
0
;
j
<
g_consumers
.
size
();
j
++
)
if
(
!
g_consumers
[
j
]
->
finalize_table
(
*
table
))
{
err
<<
"Restore: Failed to finalize restore table: %s. "
;
err
<<
"Exiting... "
<<
metaData
[
i
]
->
getTableName
()
<<
endl
;
exitHandler
(
NDBT_FAILED
);
}
}
}
}
...
...
@@ -439,12 +685,27 @@ main(int argc, char** argv)
clearConsumers
();
ndbout_c
(
"
\n
Restore successful, but encountered temporary error, "
"please look at configuration."
);
return
NDBT_ProgramExit
(
NDBT_TEMPORARY
);
}
}
clearConsumers
();
for
(
i
=
0
;
i
<
metaData
.
getNoOfTables
();
i
++
)
{
if
(
table_output
[
i
]
&&
table_output
[
i
]
!=
ndbout
.
m_out
)
{
my_fclose
(((
FileOutputStream
*
)
table_output
[
i
])
->
getFile
(),
MYF
(
MY_WME
));
delete
table_output
[
i
];
table_output
[
i
]
=
NULL
;
}
}
clearConsumers
();
return
NDBT_ProgramExit
(
NDBT_OK
);
if
(
opt_verbose
)
return
NDBT_ProgramExit
(
NDBT_OK
);
else
return
0
;
}
// main
template
class
Vector
<
BackupConsumer
*
>;
template
class
Vector
<
OutputStream
*
>;
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment