Commit 12ccf6ae authored by Yoni Fogel's avatar Yoni Fogel

refs #5663 Merge #5663 onto main

git-svn-id: file:///svn/toku/tokudb@51238 c7de825b-a66e-492c-adef-691d508d4ae1
parent c4a8d443
......@@ -11,6 +11,9 @@
#include <unistd.h>
#include "memory.h"
#include <string.h>
#include "ftloader-internal.h"
#include "ft-internal.h"
#include "ft.h"
struct dbufio_file {
// i/o thread owns these
......@@ -18,7 +21,7 @@ struct dbufio_file {
// consumers own these
size_t offset_in_buf;
toku_off_t offset_in_file;
toku_off_t offset_in_uncompressed_file;
// need the mutex to modify these
struct dbufio_file *next;
......@@ -49,6 +52,7 @@ struct dbufio_fileset {
size_t bufsize; // the bufsize is the constant (the same for all buffers).
bool panic;
bool compressed;
int panic_errno;
toku_pthread_t iothread;
};
......@@ -75,6 +79,162 @@ static bool paniced (DBUFIO_FILESET bfs) {
return bfs->panic;
}
static ssize_t dbf_read_some_compressed(struct dbufio_file *dbf, char *buf, size_t bufsize) {
ssize_t ret;
invariant(bufsize >= MAX_UNCOMPRESSED_BUF);
unsigned char *raw_block = NULL;
// deserialize the sub block header
// total_size
// num_sub_blocks
// compressed_size,uncompressed_size,xsum (repeated num_sub_blocks times)
ssize_t readcode;
const uint32_t header_size = sizeof(uint32_t);
char header[header_size];
readcode = toku_os_read(dbf->fd, &header, header_size);
if (readcode < 0) {
ret = -1;
goto exit;
}
if (readcode == 0) {
ret = 0;
goto exit;
}
if (readcode < header_size) {
errno = TOKUDB_NO_DATA;
ret = -1;
goto exit;
}
uint32_t total_size;
{
uint32_t *p = (uint32_t *) &header[0];
total_size = toku_dtoh32(p[0]);
}
if (total_size == 0 || total_size > (1<<30)) {
errno = toku_db_badformat();
ret = -1;
goto exit;
}
//Cannot use XMALLOC
MALLOC_N(total_size, raw_block);
if (raw_block == nullptr) {
errno = ENOMEM;
ret = -1;
goto exit;
}
readcode = toku_os_read(dbf->fd, raw_block, total_size);
if (readcode < 0) {
ret = -1;
goto exit;
}
if (readcode < total_size) {
errno = TOKUDB_NO_DATA;
ret = -1;
goto exit;
}
struct sub_block sub_block[max_sub_blocks];
uint32_t *sub_block_header;
sub_block_header = (uint32_t *) &raw_block[0];
int32_t n_sub_blocks;
n_sub_blocks = toku_dtoh32(sub_block_header[0]);
sub_block_header++;
size_t size_subblock_header;
size_subblock_header = sub_block_header_size(n_sub_blocks);
if (n_sub_blocks == 0 || n_sub_blocks > max_sub_blocks || size_subblock_header > total_size) {
errno = toku_db_badformat();
ret = -1;
goto exit;
}
for (int i = 0; i < n_sub_blocks; i++) {
sub_block_init(&sub_block[i]);
sub_block[i].compressed_size = toku_dtoh32(sub_block_header[0]);
sub_block[i].uncompressed_size = toku_dtoh32(sub_block_header[1]);
sub_block[i].xsum = toku_dtoh32(sub_block_header[2]);
sub_block_header += 3;
}
// verify sub block sizes
size_t total_compressed_size;
total_compressed_size = 0;
for (int i = 0; i < n_sub_blocks; i++) {
uint32_t compressed_size = sub_block[i].compressed_size;
if (compressed_size<=0 || compressed_size>(1<<30)) {
errno = toku_db_badformat();
ret = -1;
goto exit;
}
uint32_t uncompressed_size = sub_block[i].uncompressed_size;
if (uncompressed_size<=0 || uncompressed_size>(1<<30)) {
errno = toku_db_badformat();
ret = -1;
goto exit;
}
total_compressed_size += compressed_size;
}
if (total_size != total_compressed_size + size_subblock_header) {
errno = toku_db_badformat();
ret = -1;
goto exit;
}
// sum up the uncompressed size of the sub blocks
size_t uncompressed_size;
uncompressed_size = get_sum_uncompressed_size(n_sub_blocks, sub_block);
if (uncompressed_size > bufsize || uncompressed_size > MAX_UNCOMPRESSED_BUF) {
errno = toku_db_badformat();
ret = -1;
goto exit;
}
unsigned char *uncompressed_data;
uncompressed_data = (unsigned char *)buf;
// point at the start of the compressed data (past the node header, the sub block header, and the header checksum)
unsigned char *compressed_data;
compressed_data = raw_block + size_subblock_header;
// decompress all the compressed sub blocks into the uncompressed buffer
{
int r;
r = decompress_all_sub_blocks(n_sub_blocks, sub_block, compressed_data, uncompressed_data, get_num_cores(), get_ft_pool());
if (r != 0) {
fprintf(stderr, "%s:%d loader failed %d at %p size %" PRIu32"\n", __FUNCTION__, __LINE__, r, raw_block, total_size);
dump_bad_block(raw_block, total_size);
errno = r;
ret = -1;
goto exit;
}
}
ret = uncompressed_size;
exit:
if (raw_block) {
toku_free(raw_block);
}
return ret;
}
static ssize_t dbf_read_compressed(struct dbufio_file *dbf, char *buf, size_t bufsize) {
invariant(bufsize >= MAX_UNCOMPRESSED_BUF);
size_t count = 0;
while (count + MAX_UNCOMPRESSED_BUF <= bufsize) {
ssize_t readcode = dbf_read_some_compressed(dbf, buf + count, bufsize - count);
if (readcode < 0) {
return readcode;
}
count += readcode;
if (readcode == 0) {
break;
}
}
return count;
}
static void* io_thread (void *v)
// The dbuf_thread does all the asynchronous I/O.
{
......@@ -118,7 +278,13 @@ static void* io_thread (void *v)
toku_mutex_unlock(&bfs->mutex);
//printf("%s:%d Doing read fd=%d\n", __FILE__, __LINE__, dbf->fd);
{
ssize_t readcode = toku_os_read(dbf->fd, dbf->buf[1], bfs->bufsize);
ssize_t readcode;
if (bfs->compressed) {
readcode = dbf_read_compressed(dbf, dbf->buf[1], bfs->bufsize);
}
else {
readcode = toku_os_read(dbf->fd, dbf->buf[1], bfs->bufsize);
}
//printf("%s:%d readcode=%ld\n", __FILE__, __LINE__, readcode);
if (readcode==-1) {
// a real error. Save the real error.
......@@ -159,11 +325,14 @@ static void* io_thread (void *v)
}
}
int create_dbufio_fileset (DBUFIO_FILESET *bfsp, int N, int fds[/*N*/], size_t bufsize) {
int create_dbufio_fileset (DBUFIO_FILESET *bfsp, int N, int fds[/*N*/], size_t bufsize, bool compressed) {
//printf("%s:%d here\n", __FILE__, __LINE__);
int result = 0;
DBUFIO_FILESET CALLOC(bfs);
if (bfs==0) { result = get_error_errno(); }
bfs->compressed = compressed;
bool mutex_inited = false, cond_inited = false;
if (result==0) {
CALLOC_N(N, bfs->files);
......@@ -190,7 +359,7 @@ int create_dbufio_fileset (DBUFIO_FILESET *bfsp, int N, int fds[/*N*/], size_t b
for (int i=0; i<N; i++) {
bfs->files[i].fd = fds[i];
bfs->files[i].offset_in_buf = 0;
bfs->files[i].offset_in_file = 0;
bfs->files[i].offset_in_uncompressed_file = 0;
bfs->files[i].next = NULL;
bfs->files[i].second_buf_ready = false;
for (int j=0; j<2; j++) {
......@@ -202,12 +371,18 @@ int create_dbufio_fileset (DBUFIO_FILESET *bfsp, int N, int fds[/*N*/], size_t b
bfs->files[i].error_code[j] = 0;
}
bfs->files[i].io_done = false;
{
ssize_t r = toku_os_read(bfs->files[i].fd, bfs->files[i].buf[0], bufsize);
ssize_t r;
if (bfs->compressed) {
r = dbf_read_compressed(&bfs->files[i], bfs->files[i].buf[0], bufsize);
} else {
//TODO: 5663 If compressed need to read differently
r = toku_os_read(bfs->files[i].fd, bfs->files[i].buf[0], bufsize);
}
{
if (r<0) {
result=get_error_errno();
break;
} else if (r==0) {
} else if (r==0) {
// it's EOF
bfs->files[i].io_done = true;
bfs->n_not_done--;
......@@ -297,7 +472,7 @@ int dbufio_fileset_read (DBUFIO_FILESET bfs, int filenum, void *buf_v, size_t co
// Enough data is present to do it all now
memcpy(buf, dbf->buf[0]+dbf->offset_in_buf, count);
dbf->offset_in_buf += count;
dbf->offset_in_file += count;
dbf->offset_in_uncompressed_file += count;
*n_read = count;
return 0;
} else if (dbf->n_in_buf[0] > dbf->offset_in_buf) {
......@@ -306,7 +481,7 @@ int dbufio_fileset_read (DBUFIO_FILESET bfs, int filenum, void *buf_v, size_t co
assert(dbf->offset_in_buf + this_count <= bfs->bufsize);
memcpy(buf, dbf->buf[0]+dbf->offset_in_buf, this_count);
dbf->offset_in_buf += this_count;
dbf->offset_in_file += this_count;
dbf->offset_in_uncompressed_file += this_count;
size_t sub_n_read;
int r = dbufio_fileset_read(bfs, filenum, buf+this_count, count-this_count, &sub_n_read);
if (r==0) {
......
......@@ -14,7 +14,7 @@
/* An implementation would typically use a separate thread or asynchronous I/O to fetch ahead data for each file. The system will typically fill two buffers of size M for each file. One buffer is being read out of using dbuf_read(), and the other buffer is either empty (waiting on the asynchronous I/O to start), being filled in by the asynchronous I/O mechanism, or is waiting for the caller to read data from it. */
typedef struct dbufio_fileset *DBUFIO_FILESET;
int create_dbufio_fileset (DBUFIO_FILESET *bfsp, int N, int fds[/*N*/], size_t bufsize);
int create_dbufio_fileset (DBUFIO_FILESET *bfsp, int N, int fds[/*N*/], size_t bufsize, bool compressed);
int destroy_dbufio_fileset(DBUFIO_FILESET);
......
......@@ -110,4 +110,7 @@ void toku_ft_set_blackhole(FT_HANDLE ft_handle);
// The difference between the two is MVCC garbage.
void toku_ft_get_garbage(FT ft, uint64_t *total_space, uint64_t *used_space);
int get_num_cores(void);
struct toku_thread_pool *get_ft_pool(void);
void dump_bad_block(unsigned char *vp, uint64_t size);
#endif
......@@ -10,6 +10,7 @@
#include <portability/toku_atomic.h>
#include <util/sort.h>
#include <util/threadpool.h>
#include "ft.h"
static FT_UPGRADE_STATUS_S ft_upgrade_status;
......@@ -57,6 +58,14 @@ static inline void do_toku_trace(const char *cp, int len) {
static int num_cores = 0; // cache the number of cores for the parallelization
static struct toku_thread_pool *ft_pool = NULL;
int get_num_cores(void) {
return num_cores;
}
struct toku_thread_pool *get_ft_pool(void) {
return ft_pool;
}
void
toku_ft_serialize_layer_init(void) {
num_cores = toku_os_get_number_active_processors();
......@@ -980,7 +989,7 @@ deserialize_child_buffer(NONLEAF_CHILDINFO bnc, struct rbuf *rbuf,
// dump a buffer to stderr
// no locking around this for now
static void
void
dump_bad_block(unsigned char *vp, uint64_t size) {
const uint64_t linesize = 64;
uint64_t n = size / linesize;
......@@ -1181,14 +1190,7 @@ read_and_decompress_sub_block(struct rbuf *rb, struct sub_block *sb)
goto exit;
}
sb->uncompressed_ptr = toku_xmalloc(sb->uncompressed_size);
toku_decompress(
(Bytef *) sb->uncompressed_ptr,
sb->uncompressed_size,
(Bytef *) sb->compressed_ptr,
sb->compressed_size
);
just_decompress_sub_block(sb);
exit:
return r;
}
......
......@@ -12,6 +12,19 @@
#include <toku_pthread.h>
#include "dbufio.h"
enum { EXTRACTOR_QUEUE_DEPTH = 2,
FILE_BUFFER_SIZE = 1<<24,
MIN_ROWSET_MEMORY = 1<<23,
MIN_MERGE_FANIN = 2,
FRACTAL_WRITER_QUEUE_DEPTH = 3,
FRACTAL_WRITER_ROWSETS = FRACTAL_WRITER_QUEUE_DEPTH + 2,
DBUFIO_DEPTH = 2,
TARGET_MERGE_BUF_SIZE = 1<<24, // we'd like the merge buffer to be this big.
MIN_MERGE_BUF_SIZE = 1<<20, // always use at least this much
MAX_UNCOMPRESSED_BUF = MIN_MERGE_BUF_SIZE
};
/* These functions are exported to allow the tests to compile. */
/* These structures maintain a collection of all the open temporary files used by the loader. */
......@@ -56,7 +69,7 @@ int init_rowset (struct rowset *rows, uint64_t memory_budget);
void destroy_rowset (struct rowset *rows);
int add_row (struct rowset *rows, DBT *key, DBT *val);
int loader_write_row(DBT *key, DBT *val, FIDX data, FILE*, uint64_t *dataoff, FTLOADER bl);
int loader_write_row(DBT *key, DBT *val, FIDX data, FILE*, uint64_t *dataoff, struct wbuf *wb, FTLOADER bl);
int loader_read_row (FILE *f, DBT *key, DBT *val);
struct merge_fileset {
......@@ -146,6 +159,7 @@ struct ft_loader_s {
CACHETABLE cachetable;
bool did_reserve_memory;
bool compress_intermediates;
uint64_t reserved_memory; // how much memory are we allowed to use?
/* To make it easier to recover from errors, we don't use FILE*, instead we use an index into the file_infos. */
......@@ -243,7 +257,8 @@ int toku_ft_loader_internal_init (/* out */ FTLOADER *blp,
const char *temp_file_template,
LSN load_lsn,
TOKUTXN txn,
bool reserve_memory);
bool reserve_memory,
bool compress_intermediates);
void toku_ft_loader_internal_destroy (FTLOADER bl, bool is_error);
......
This diff is collapsed.
......@@ -26,7 +26,8 @@ int toku_ft_loader_open (FTLOADER *bl,
const char *temp_file_template,
LSN load_lsn,
TOKUTXN txn,
bool reserve_memory);
bool reserve_memory,
bool compress_intermediates);
int toku_ft_loader_put (FTLOADER bl, DBT *key, DBT *val);
......
......@@ -44,7 +44,7 @@ sub_block_header_size(int n_sub_blocks);
void
set_compressed_size_bound(struct sub_block *se, enum toku_compression_method method);
// get the sum of the sub block compressed sizes
// get the sum of the sub block compressed bound sizes
size_t
get_sum_compressed_size_bound(int n_sub_blocks, struct sub_block sub_block[], enum toku_compression_method method);
......
......@@ -42,7 +42,7 @@ static void test1 (size_t chars_per_file, size_t UU(bytes_per_read)) {
}
DBUFIO_FILESET bfs;
{
int r = create_dbufio_fileset(&bfs, N, fds, M);
int r = create_dbufio_fileset(&bfs, N, fds, M, false);
assert(r==0);
}
......
......@@ -41,7 +41,7 @@ static void test1 (size_t chars_per_file, size_t bytes_per_read) {
}
DBUFIO_FILESET bfs;
{
int r = create_dbufio_fileset(&bfs, N, fds, M);
int r = create_dbufio_fileset(&bfs, N, fds, M, false);
assert(r==0);
}
while (n_live>0) {
......
......@@ -87,7 +87,7 @@ static void test_extractor(int nrows, int nrowsets, bool expect_fail) {
}
FTLOADER loader;
r = toku_ft_loader_open(&loader, NULL, generate, NULL, N, brts, dbs, fnames, compares, "tempXXXXXX", ZERO_LSN, TXNID_NONE, true);
r = toku_ft_loader_open(&loader, NULL, generate, NULL, N, brts, dbs, fnames, compares, "tempXXXXXX", ZERO_LSN, TXNID_NONE, true, false);
assert(r == 0);
struct rowset *rowset[nrowsets];
......
......@@ -99,7 +99,7 @@ static void test_extractor(int nrows, int nrowsets, bool expect_fail, const char
sprintf(temp, "%s/%s", testdir, "tempXXXXXX");
FTLOADER loader;
r = toku_ft_loader_open(&loader, NULL, generate, NULL, N, brts, dbs, fnames, compares, "tempXXXXXX", ZERO_LSN, TXNID_NONE, true);
r = toku_ft_loader_open(&loader, NULL, generate, NULL, N, brts, dbs, fnames, compares, "tempXXXXXX", ZERO_LSN, TXNID_NONE, true, false);
assert(r == 0);
struct rowset *rowset[nrowsets];
......
......@@ -319,7 +319,7 @@ static void test_extractor(int nrows, int nrowsets, const char *testdir) {
sprintf(temp, "%s/%s", testdir, "tempXXXXXX");
FTLOADER loader;
r = toku_ft_loader_open(&loader, NULL, generate, NULL, N, brts, dbs, fnames, compares, temp, ZERO_LSN, TXNID_NONE, true);
r = toku_ft_loader_open(&loader, NULL, generate, NULL, N, brts, dbs, fnames, compares, temp, ZERO_LSN, TXNID_NONE, true, false);
assert(r == 0);
struct rowset *rowset[nrowsets];
......
......@@ -326,7 +326,7 @@ static void test (const char *directory, bool is_error) {
bt_compare_functions,
"tempxxxxxx",
*lsnp,
TXNID_NONE, true);
TXNID_NONE, true, false);
assert(r==0);
}
......@@ -340,7 +340,7 @@ static void test (const char *directory, bool is_error) {
{ int r = queue_create(&q, 1000); assert(r==0); }
DBUFIO_FILESET bfs;
const int MERGE_BUF_SIZE = 100000; // bigger than 64K so that we will trigger malloc issues.
{ int r = create_dbufio_fileset(&bfs, N_SOURCES, fds, MERGE_BUF_SIZE); assert(r==0); }
{ int r = create_dbufio_fileset(&bfs, N_SOURCES, fds, MERGE_BUF_SIZE, false); assert(r==0); }
FIDX *XMALLOC_N(N_SOURCES, src_fidxs);
assert(bl->file_infos.n_files==0);
bl->file_infos.n_files = N_SOURCES;
......
......@@ -57,7 +57,7 @@ static void test_loader_open(int ndbs) {
for (i = 0; ; i++) {
set_my_malloc_trigger(i+1);
r = toku_ft_loader_open(&loader, NULL, NULL, NULL, ndbs, brts, dbs, fnames, compares, "", ZERO_LSN, TXNID_NONE, true);
r = toku_ft_loader_open(&loader, NULL, NULL, NULL, ndbs, brts, dbs, fnames, compares, "", ZERO_LSN, TXNID_NONE, true, false);
if (r == 0)
break;
}
......
......@@ -187,7 +187,7 @@ static void test_read_write_rows (char *tf_template) {
toku_fill_dbt(&key, keystrings[i], strlen(keystrings[i]));
DBT val;
toku_fill_dbt(&val, valstrings[i], strlen(valstrings[i]));
r = loader_write_row(&key, &val, file, toku_bl_fidx2file(&bl, file), &dataoff, &bl);
r = loader_write_row(&key, &val, file, toku_bl_fidx2file(&bl, file), &dataoff, nullptr, &bl);
CKERR(r);
actual_size+=key.size + val.size + 8;
}
......
......@@ -430,3 +430,8 @@ int toku_fsync_directory(const char *fname) {
toku_free(dirname);
return result;
}
FILE *toku_os_fmemopen(void *buf, size_t size, const char *mode) {
return fmemopen(buf, size, mode);
}
......@@ -174,6 +174,7 @@ toku_loader_create_loader(DB_ENV *env,
DB_LOADER *loader = NULL;
bool puts_allowed = !(loader_flags & LOADER_DISALLOW_PUTS);
bool compress_intermediates = (loader_flags & LOADER_COMPRESS_INTERMEDIATES) != 0;
XCALLOC(loader); // init to all zeroes (thus initializing the error_callback and poll_func)
XCALLOC(loader->i); // init to all zeroes (thus initializing all pointers to NULL)
......@@ -252,7 +253,8 @@ toku_loader_create_loader(DB_ENV *env,
loader->i->temp_file_template,
load_lsn,
ttxn,
puts_allowed);
puts_allowed,
compress_intermediates);
if ( rval!=0 ) {
toku_free(new_inames_in_env);
toku_free(brts);
......
......@@ -546,14 +546,22 @@ if(BUILD_TESTING OR BUILD_SRC_TESTS)
add_test(ydb/loader-stress-test2.tdb loader-stress-test.tdb -r 5000 -s -e dir.loader-stress-test2.tdb)
add_test(ydb/loader-stress-test3.tdb loader-stress-test.tdb -u -c -e dir.loader-stress-test3.tdb)
add_test(ydb/loader-stress-test4.tdb loader-stress-test.tdb -r 10000000 -c -e dir.loader-stress-test4.tdb)
add_test(ydb/loader-stress-test5.tdb loader-stress-test.tdb -c -z -e dir.loader-stress-test5.tdb)
add_test(ydb/loader-stress-test0z.tdb loader-stress-test.tdb -c -e dir.loader-stress-test0z.tdb -z)
add_test(ydb/loader-stress-test1z.tdb loader-stress-test.tdb -c -p -e dir.loader-stress-test1z.tdb -z)
add_test(ydb/loader-stress-test2z.tdb loader-stress-test.tdb -r 5000 -s -e dir.loader-stress-test2z.tdb -z)
add_test(ydb/loader-stress-test3z.tdb loader-stress-test.tdb -u -c -e dir.loader-stress-test3z.tdb -z)
add_test(ydb/loader-stress-test4z.tdb loader-stress-test.tdb -r 10000000 -c -e dir.loader-stress-test4z.tdb -z)
set_property(DIRECTORY APPEND PROPERTY ADDITIONAL_MAKE_CLEAN_FILES
dir.loader-stress-test0.tdb
dir.loader-stress-test1.tdb
dir.loader-stress-test2.tdb
dir.loader-stress-test3.tdb
dir.loader-stress-test4.tdb
dir.loader-stress-test5.tdb
dir.loader-stress-test0z.tdb
dir.loader-stress-test1z.tdb
dir.loader-stress-test2z.tdb
dir.loader-stress-test3z.tdb
dir.loader-stress-test4z.tdb
)
list(REMOVE_ITEM loader_tests loader-dup-test.loader)
......@@ -563,6 +571,12 @@ if(BUILD_TESTING OR BUILD_SRC_TESTS)
add_test(ydb/loader-dup-test3.tdb loader-dup-test.tdb -d 1 -s -r 100 -e dir.loader-dup-test3.tdb)
add_test(ydb/loader-dup-test4.tdb loader-dup-test.tdb -d 1 -s -r 1000 -e dir.loader-dup-test4.tdb)
add_test(ydb/loader-dup-test5.tdb loader-dup-test.tdb -d 1 -s -r 1000 -E -e dir.loader-dup-test5.tdb)
add_test(ydb/loader-dup-test0z.tdb loader-dup-test.tdb -e dir.loader-dup-test0z.tdb -z)
add_test(ydb/loader-dup-test1z.tdb loader-dup-test.tdb -d 1 -r 500000 -e dir.loader-dup-test1z.tdb -z)
add_test(ydb/loader-dup-test2z.tdb loader-dup-test.tdb -d 1 -r 1000000 -e dir.loader-dup-test2z.tdb -z)
add_test(ydb/loader-dup-test3z.tdb loader-dup-test.tdb -d 1 -s -r 100 -e dir.loader-dup-test3z.tdb -z)
add_test(ydb/loader-dup-test4z.tdb loader-dup-test.tdb -d 1 -s -r 1000 -e dir.loader-dup-test4z.tdb -z)
add_test(ydb/loader-dup-test5z.tdb loader-dup-test.tdb -d 1 -s -r 1000 -E -e dir.loader-dup-test5z.tdb -z)
set_property(DIRECTORY APPEND PROPERTY ADDITIONAL_MAKE_CLEAN_FILES
dir.loader-dup-test0.tdb
dir.loader-dup-test1.tdb
......@@ -570,6 +584,12 @@ if(BUILD_TESTING OR BUILD_SRC_TESTS)
dir.loader-dup-test3.tdb
dir.loader-dup-test4.tdb
dir.loader-dup-test5.tdb
dir.loader-dup-test0z.tdb
dir.loader-dup-test1z.tdb
dir.loader-dup-test2z.tdb
dir.loader-dup-test3z.tdb
dir.loader-dup-test4z.tdb
dir.loader-dup-test5z.tdb
)
## as part of #4503, we took out test 1 and 3
......@@ -578,6 +598,8 @@ if(BUILD_TESTING OR BUILD_SRC_TESTS)
#add_test(ydb/loader-cleanup-test1.tdb loader-cleanup-test.tdb -s -r 800 -p -e dir.loader-cleanup-test1.tdb)
add_test(ydb/loader-cleanup-test2.tdb loader-cleanup-test.tdb -s -r 8000 -e dir.loader-cleanup-test2.tdb)
#add_test(ydb/loader-cleanup-test3.tdb loader-cleanup-test.tdb -s -r 8000 -p -e dir.loader-cleanup-test3.tdb)
add_test(ydb/loader-cleanup-test0z.tdb loader-cleanup-test.tdb -s -r 800 -e dir.loader-cleanup-test0z.tdb -z)
add_test(ydb/loader-cleanup-test2z.tdb loader-cleanup-test.tdb -s -r 8000 -e dir.loader-cleanup-test2z.tdb -z)
set_property(DIRECTORY APPEND PROPERTY ADDITIONAL_MAKE_CLEAN_FILES
dir.loader-cleanup-test0.tdb
dir.loader-cleanup-test1.tdb
......@@ -606,11 +628,15 @@ if(BUILD_TESTING OR BUILD_SRC_TESTS)
)
declare_custom_tests(maxsize-for-loader.tdb)
add_test(ydb/maxsize-for-loader-A.tdb maxsize-for-loader.tdb -e dir.maxsize-for-loader-A.tdb -f)
add_test(ydb/maxsize-for-loader-B.tdb maxsize-for-loader.tdb -e dir.maxsize-for-loader-B.tdb)
add_test(ydb/maxsize-for-loader-A.tdb maxsize-for-loader.tdb -e dir.maxsize-for-loader-A.tdb -f -c)
add_test(ydb/maxsize-for-loader-B.tdb maxsize-for-loader.tdb -e dir.maxsize-for-loader-B.tdb -c)
add_test(ydb/maxsize-for-loader-Az.tdb maxsize-for-loader.tdb -e dir.maxsize-for-loader-Az.tdb -f -z -c)
add_test(ydb/maxsize-for-loader-Bz.tdb maxsize-for-loader.tdb -e dir.maxsize-for-loader-Bz.tdb -z -c)
set_property(DIRECTORY APPEND PROPERTY ADDITIONAL_MAKE_CLEAN_FILES
dir.maxsize-for-loader-A.tdb
dir.maxsize-for-loader-B.tdb
dir.maxsize-for-loader-Az.tdb
dir.maxsize-for-loader-Bz.tdb
)
declare_custom_tests(hotindexer-undo-do-test.tdb)
......@@ -667,6 +693,8 @@ if(BUILD_TESTING OR BUILD_SRC_TESTS)
ydb/env-put-multiple.tdb
ydb/filesize.tdb
ydb/loader-cleanup-test0.tdb
ydb/loader-cleanup-test0z.tdb
ydb/loader-cleanup-test2z.tdb
ydb/manyfiles.tdb
ydb/recover-loader-test.abortrecover
ydb/recovery_fileops_stress.tdb
......@@ -712,6 +740,7 @@ if(BUILD_TESTING OR BUILD_SRC_TESTS)
ydb/loader-stress-del.nop.loader
ydb/loader-stress-del.p.loader
ydb/loader-stress-del.comp.loader
ydb/loader-stress-test4z.tdb
ydb/test3039.tdb
ydb/test3529.tdb
ydb/test_update_stress.tdb
......
This diff is collapsed.
......@@ -266,6 +266,7 @@ int toku_os_close(int fd);
int toku_os_fclose(FILE * stream);
ssize_t toku_os_read(int fd, void *buf, size_t count);
ssize_t toku_os_pread(int fd, void *buf, size_t count, off_t offset);
FILE *toku_os_fmemopen(void *buf, size_t size, const char *mode);
// wrapper around fsync
void toku_file_fsync_without_accounting(int fd);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment