Commit 4d74fb5c authored by Leif Walsh's avatar Leif Walsh Committed by Yoni Fogel

[t:4570] finishing the enum->int change for checksum errors

git-svn-id: file:///svn/toku/tokudb@44326 c7de825b-a66e-492c-adef-691d508d4ae1
parent 4ba7bcad
...@@ -778,12 +778,12 @@ translation_default(struct translation *t) { // destination into which to creat ...@@ -778,12 +778,12 @@ translation_default(struct translation *t) { // destination into which to creat
} }
static enum deserialize_error_code static int
translation_deserialize_from_buffer(struct translation *t, // destination into which to deserialize translation_deserialize_from_buffer(struct translation *t, // destination into which to deserialize
DISKOFF location_on_disk, //Location of translation_buffer DISKOFF location_on_disk, //Location of translation_buffer
u_int64_t size_on_disk, u_int64_t size_on_disk,
unsigned char * translation_buffer) { // buffer with serialized translation unsigned char * translation_buffer) { // buffer with serialized translation
enum deserialize_error_code e; int r = 0;
assert(location_on_disk!=0); assert(location_on_disk!=0);
t->type = TRANSLATION_CHECKPOINTED; t->type = TRANSLATION_CHECKPOINTED;
{ {
...@@ -794,7 +794,7 @@ translation_deserialize_from_buffer(struct translation *t, // destination int ...@@ -794,7 +794,7 @@ translation_deserialize_from_buffer(struct translation *t, // destination int
u_int32_t stored_x1764 = toku_dtoh32(*(int*)(translation_buffer + offset)); u_int32_t stored_x1764 = toku_dtoh32(*(int*)(translation_buffer + offset));
if (x1764 != stored_x1764) { if (x1764 != stored_x1764) {
fprintf(stderr, "Translation table checksum failure: calc=0x%08x read=0x%08x\n", x1764, stored_x1764); fprintf(stderr, "Translation table checksum failure: calc=0x%08x read=0x%08x\n", x1764, stored_x1764);
e = DS_XSUM_FAIL; r = TOKUDB_BAD_CHECKSUM;
goto exit; goto exit;
} }
} }
...@@ -817,9 +817,8 @@ PRNTF("ReadIn", i, t->block_translation[i].size, t->block_translation[i].u.disko ...@@ -817,9 +817,8 @@ PRNTF("ReadIn", i, t->block_translation[i].size, t->block_translation[i].u.disko
assert(calculate_size_on_disk(t) == (int64_t)size_on_disk); assert(calculate_size_on_disk(t) == (int64_t)size_on_disk);
assert(t->block_translation[RESERVED_BLOCKNUM_TRANSLATION].size == (int64_t)size_on_disk); assert(t->block_translation[RESERVED_BLOCKNUM_TRANSLATION].size == (int64_t)size_on_disk);
assert(t->block_translation[RESERVED_BLOCKNUM_TRANSLATION].u.diskoff == location_on_disk); assert(t->block_translation[RESERVED_BLOCKNUM_TRANSLATION].u.diskoff == location_on_disk);
e = DS_OK;
exit: exit:
return e; return r;
} }
// We just initialized a translation, inform block allocator to reserve space for each blocknum in use. // We just initialized a translation, inform block allocator to reserve space for each blocknum in use.
...@@ -848,15 +847,15 @@ blocktable_note_translation (BLOCK_ALLOCATOR allocator, struct translation *t) { ...@@ -848,15 +847,15 @@ blocktable_note_translation (BLOCK_ALLOCATOR allocator, struct translation *t) {
// The one read from disk is the last known checkpointed one, so we are keeping it in // The one read from disk is the last known checkpointed one, so we are keeping it in
// place and then setting current (which is never stored on disk) for current use. // place and then setting current (which is never stored on disk) for current use.
// The translation_buffer has translation only, we create the rest of the block_table. // The translation_buffer has translation only, we create the rest of the block_table.
enum deserialize_error_code int
toku_blocktable_create_from_buffer(int fd, toku_blocktable_create_from_buffer(int fd,
BLOCK_TABLE *btp, BLOCK_TABLE *btp,
DISKOFF location_on_disk, //Location of translation_buffer DISKOFF location_on_disk, //Location of translation_buffer
DISKOFF size_on_disk, DISKOFF size_on_disk,
unsigned char *translation_buffer) { unsigned char *translation_buffer) {
BLOCK_TABLE bt = blocktable_create_internal(); BLOCK_TABLE bt = blocktable_create_internal();
enum deserialize_error_code e = translation_deserialize_from_buffer(&bt->checkpointed, location_on_disk, size_on_disk, translation_buffer); int r = translation_deserialize_from_buffer(&bt->checkpointed, location_on_disk, size_on_disk, translation_buffer);
if (e != DS_OK) { if (r != 0) {
goto exit; goto exit;
} }
blocktable_note_translation(bt->block_allocator, &bt->checkpointed); blocktable_note_translation(bt->block_allocator, &bt->checkpointed);
...@@ -864,14 +863,14 @@ toku_blocktable_create_from_buffer(int fd, ...@@ -864,14 +863,14 @@ toku_blocktable_create_from_buffer(int fd,
copy_translation(&bt->current, &bt->checkpointed, TRANSLATION_CURRENT); copy_translation(&bt->current, &bt->checkpointed, TRANSLATION_CURRENT);
int64_t file_size; int64_t file_size;
int r = toku_os_get_file_size(fd, &file_size); r = toku_os_get_file_size(fd, &file_size);
lazy_assert_zero(r); lazy_assert_zero(r);
invariant(file_size >= 0); invariant(file_size >= 0);
bt->safe_file_size = file_size; bt->safe_file_size = file_size;
*btp = bt; *btp = bt;
exit: exit:
return e; return r;
} }
......
...@@ -24,7 +24,7 @@ struct block_translation_pair { ...@@ -24,7 +24,7 @@ struct block_translation_pair {
}; };
void toku_blocktable_create_new(BLOCK_TABLE *btp); void toku_blocktable_create_new(BLOCK_TABLE *btp);
enum deserialize_error_code toku_blocktable_create_from_buffer(int fd, BLOCK_TABLE *btp, DISKOFF location_on_disk, DISKOFF size_on_disk, unsigned char *translation_buffer); int toku_blocktable_create_from_buffer(int fd, BLOCK_TABLE *btp, DISKOFF location_on_disk, DISKOFF size_on_disk, unsigned char *translation_buffer);
void toku_blocktable_destroy(BLOCK_TABLE *btp); void toku_blocktable_destroy(BLOCK_TABLE *btp);
void toku_ft_lock(FT h); void toku_ft_lock(FT h);
......
...@@ -529,10 +529,9 @@ deserialize_ft_from_fd_into_rbuf(int fd, ...@@ -529,10 +529,9 @@ deserialize_ft_from_fd_into_rbuf(int fd,
struct rbuf *rb, struct rbuf *rb,
u_int64_t *checkpoint_count, u_int64_t *checkpoint_count,
LSN *checkpoint_lsn, LSN *checkpoint_lsn,
u_int32_t * version_p, u_int32_t * version_p);
enum deserialize_error_code *e);
enum deserialize_error_code int
deserialize_ft_versioned(int fd, struct rbuf *rb, FT *ft, uint32_t version); deserialize_ft_versioned(int fd, struct rbuf *rb, FT *ft, uint32_t version);
int int
...@@ -593,7 +592,7 @@ int toku_serialize_ft_to_wbuf ( ...@@ -593,7 +592,7 @@ int toku_serialize_ft_to_wbuf (
DISKOFF translation_location_on_disk, DISKOFF translation_location_on_disk,
DISKOFF translation_size_on_disk DISKOFF translation_size_on_disk
); );
enum deserialize_error_code toku_deserialize_ft_from (int fd, LSN max_acceptable_lsn, FT *ft); int toku_deserialize_ft_from (int fd, LSN max_acceptable_lsn, FT *ft);
int toku_serialize_descriptor_contents_to_fd(int fd, const DESCRIPTOR desc, DISKOFF offset); int toku_serialize_descriptor_contents_to_fd(int fd, const DESCRIPTOR desc, DISKOFF offset);
void toku_serialize_descriptor_contents_to_wbuf(struct wbuf *wb, const DESCRIPTOR desc); void toku_serialize_descriptor_contents_to_wbuf(struct wbuf *wb, const DESCRIPTOR desc);
BASEMENTNODE toku_create_empty_bn(void); BASEMENTNODE toku_create_empty_bn(void);
......
...@@ -774,16 +774,13 @@ int toku_ftnode_fetch_callback (CACHEFILE UU(cachefile), int fd, BLOCKNUM nodena ...@@ -774,16 +774,13 @@ int toku_ftnode_fetch_callback (CACHEFILE UU(cachefile), int fd, BLOCKNUM nodena
// deserialize the node, must pass the bfe in because we cannot // deserialize the node, must pass the bfe in because we cannot
// evaluate what piece of the the node is necessary until we get it at // evaluate what piece of the the node is necessary until we get it at
// least partially into memory // least partially into memory
enum deserialize_error_code e; int r = toku_deserialize_ftnode_from(fd, nodename, fullhash, node, ndd, bfe);
int r = 0; if (r != 0) {
e = toku_deserialize_ftnode_from(fd, nodename, fullhash, node, ndd, bfe); if (r == TOKUDB_BAD_CHECKSUM) {
if (e != DS_OK) {
if (e == DS_XSUM_FAIL) {
fprintf(stderr, fprintf(stderr,
"Checksum failure while reading node in file %s.\n", "Checksum failure while reading node in file %s.\n",
toku_cachefile_fname_in_env(cachefile)); toku_cachefile_fname_in_env(cachefile));
} else if (e == DS_ERRNO) { } else {
r = errno;
fprintf(stderr, "Error deserializing node, errno = %d", r); fprintf(stderr, "Error deserializing node, errno = %d", r);
} }
// make absolutely sure we crash before doing anything else. // make absolutely sure we crash before doing anything else.
...@@ -1091,7 +1088,7 @@ ft_status_update_partial_fetch_reason( ...@@ -1091,7 +1088,7 @@ ft_status_update_partial_fetch_reason(
// callback for partially reading a node // callback for partially reading a node
// could have just used toku_ftnode_fetch_callback, but wanted to separate the two cases to separate functions // could have just used toku_ftnode_fetch_callback, but wanted to separate the two cases to separate functions
int toku_ftnode_pf_callback(void* ftnode_pv, void* disk_data, void* read_extraargs, int fd, PAIR_ATTR* sizep) { int toku_ftnode_pf_callback(void* ftnode_pv, void* disk_data, void* read_extraargs, int fd, PAIR_ATTR* sizep) {
enum deserialize_error_code e = DS_OK; int r = 0;
FTNODE node = ftnode_pv; FTNODE node = ftnode_pv;
FTNODE_DISK_DATA ndd = disk_data; FTNODE_DISK_DATA ndd = disk_data;
struct ftnode_fetch_extra *bfe = read_extraargs; struct ftnode_fetch_extra *bfe = read_extraargs;
...@@ -1119,22 +1116,22 @@ int toku_ftnode_pf_callback(void* ftnode_pv, void* disk_data, void* read_extraar ...@@ -1119,22 +1116,22 @@ int toku_ftnode_pf_callback(void* ftnode_pv, void* disk_data, void* read_extraar
if ((lc <= i && i <= rc) || toku_bfe_wants_child_available(bfe, i)) { if ((lc <= i && i <= rc) || toku_bfe_wants_child_available(bfe, i)) {
ft_status_update_partial_fetch_reason(bfe, i, BP_STATE(node, i), (node->height == 0)); ft_status_update_partial_fetch_reason(bfe, i, BP_STATE(node, i), (node->height == 0));
if (BP_STATE(node,i) == PT_COMPRESSED) { if (BP_STATE(node,i) == PT_COMPRESSED) {
e = toku_deserialize_bp_from_compressed(node, i, &bfe->h->cmp_descriptor, bfe->h->compare_fun); r = toku_deserialize_bp_from_compressed(node, i, &bfe->h->cmp_descriptor, bfe->h->compare_fun);
} }
else if (BP_STATE(node,i) == PT_ON_DISK) { else if (BP_STATE(node,i) == PT_ON_DISK) {
e = toku_deserialize_bp_from_disk(node, ndd, i, fd, bfe); r = toku_deserialize_bp_from_disk(node, ndd, i, fd, bfe);
} }
else { else {
assert(FALSE); assert(FALSE);
} }
} }
if (e != DS_OK) { if (r != 0) {
if (e == DS_XSUM_FAIL) { if (r == TOKUDB_BAD_CHECKSUM) {
fprintf(stderr, fprintf(stderr,
"Checksum failure while reading node partition in file %s.\n", "Checksum failure while reading node partition in file %s.\n",
toku_cachefile_fname_in_env(bfe->h->cf)); toku_cachefile_fname_in_env(bfe->h->cf));
} else if (e == DS_ERRNO) { } else {
fprintf(stderr, fprintf(stderr,
"Error while reading node partition %d\n", "Error while reading node partition %d\n",
errno); errno);
......
...@@ -90,9 +90,9 @@ deserialize_descriptor_from_rbuf(struct rbuf *rb, DESCRIPTOR desc, int layout_ve ...@@ -90,9 +90,9 @@ deserialize_descriptor_from_rbuf(struct rbuf *rb, DESCRIPTOR desc, int layout_ve
toku_fill_dbt(&desc->dbt, data_copy, size); toku_fill_dbt(&desc->dbt, data_copy, size);
} }
static enum deserialize_error_code static int
deserialize_descriptor_from(int fd, BLOCK_TABLE bt, DESCRIPTOR desc, int layout_version) { deserialize_descriptor_from(int fd, BLOCK_TABLE bt, DESCRIPTOR desc, int layout_version) {
enum deserialize_error_code e; int r = 0;
DISKOFF offset; DISKOFF offset;
DISKOFF size; DISKOFF size;
unsigned char *dbuf = NULL; unsigned char *dbuf = NULL;
...@@ -103,8 +103,8 @@ deserialize_descriptor_from(int fd, BLOCK_TABLE bt, DESCRIPTOR desc, int layout_ ...@@ -103,8 +103,8 @@ deserialize_descriptor_from(int fd, BLOCK_TABLE bt, DESCRIPTOR desc, int layout_
{ {
XMALLOC_N(size, dbuf); XMALLOC_N(size, dbuf);
{ {
ssize_t r = toku_os_pread(fd, dbuf, size, offset); ssize_t sz_read = toku_os_pread(fd, dbuf, size, offset);
lazy_assert(r==size); lazy_assert(sz_read==size);
} }
{ {
// check the checksum // check the checksum
...@@ -113,7 +113,7 @@ deserialize_descriptor_from(int fd, BLOCK_TABLE bt, DESCRIPTOR desc, int layout_ ...@@ -113,7 +113,7 @@ deserialize_descriptor_from(int fd, BLOCK_TABLE bt, DESCRIPTOR desc, int layout_
u_int32_t stored_x1764 = toku_dtoh32(*(int*)(dbuf + size-4)); u_int32_t stored_x1764 = toku_dtoh32(*(int*)(dbuf + size-4));
if (x1764 != stored_x1764) { if (x1764 != stored_x1764) {
fprintf(stderr, "Descriptor checksum failure: calc=0x%08x read=0x%08x\n", x1764, stored_x1764); fprintf(stderr, "Descriptor checksum failure: calc=0x%08x read=0x%08x\n", x1764, stored_x1764);
e = DS_XSUM_FAIL; r = TOKUDB_BAD_CHECKSUM;
toku_free(dbuf); toku_free(dbuf);
goto exit; goto exit;
} }
...@@ -127,16 +127,15 @@ deserialize_descriptor_from(int fd, BLOCK_TABLE bt, DESCRIPTOR desc, int layout_ ...@@ -127,16 +127,15 @@ deserialize_descriptor_from(int fd, BLOCK_TABLE bt, DESCRIPTOR desc, int layout_
toku_free(dbuf); toku_free(dbuf);
} }
} }
e = DS_OK;
exit: exit:
return e; return r;
} }
// We only deserialize brt header once and then share everything with all the brts. // We only deserialize brt header once and then share everything with all the brts.
enum deserialize_error_code int
deserialize_ft_versioned(int fd, struct rbuf *rb, FT *ftp, uint32_t version) deserialize_ft_versioned(int fd, struct rbuf *rb, FT *ftp, uint32_t version)
{ {
enum deserialize_error_code e = DS_OK; int r;
FT ft = NULL; FT ft = NULL;
invariant(version >= FT_LAYOUT_MIN_SUPPORTED_VERSION); invariant(version >= FT_LAYOUT_MIN_SUPPORTED_VERSION);
invariant(version <= FT_LAYOUT_VERSION); invariant(version <= FT_LAYOUT_VERSION);
...@@ -152,14 +151,14 @@ deserialize_ft_versioned(int fd, struct rbuf *rb, FT *ftp, uint32_t version) ...@@ -152,14 +151,14 @@ deserialize_ft_versioned(int fd, struct rbuf *rb, FT *ftp, uint32_t version)
XCALLOC(ft); XCALLOC(ft);
if (!ft) { if (!ft) {
e = DS_ERRNO; r = errno;
goto exit; goto exit;
} }
ft->checkpoint_header = NULL; ft->checkpoint_header = NULL;
ft->panic = 0; ft->panic = 0;
ft->panic_string = 0; ft->panic_string = 0;
toku_list_init(&ft->live_ft_handles); toku_list_init(&ft->live_ft_handles);
int r = toku_omt_create(&ft->txns); r = toku_omt_create(&ft->txns);
assert_zero(r); assert_zero(r);
//version MUST be in network order on disk regardless of disk order //version MUST be in network order on disk regardless of disk order
...@@ -204,13 +203,13 @@ deserialize_ft_versioned(int fd, struct rbuf *rb, FT *ftp, uint32_t version) ...@@ -204,13 +203,13 @@ deserialize_ft_versioned(int fd, struct rbuf *rb, FT *ftp, uint32_t version)
lazy_assert(readsz == translation_size_on_disk); lazy_assert(readsz == translation_size_on_disk);
} }
// Create table and read in data. // Create table and read in data.
e = toku_blocktable_create_from_buffer(fd, r = toku_blocktable_create_from_buffer(fd,
&ft->blocktable, &ft->blocktable,
translation_address_on_disk, translation_address_on_disk,
translation_size_on_disk, translation_size_on_disk,
tbuf); tbuf);
toku_free(tbuf); toku_free(tbuf);
if (e != DS_OK) { if (r != 0) {
goto exit; goto exit;
} }
} }
...@@ -283,8 +282,7 @@ deserialize_ft_versioned(int fd, struct rbuf *rb, FT *ftp, uint32_t version) ...@@ -283,8 +282,7 @@ deserialize_ft_versioned(int fd, struct rbuf *rb, FT *ftp, uint32_t version)
(void) rbuf_int(rb); //Read in checksum and ignore (already verified). (void) rbuf_int(rb); //Read in checksum and ignore (already verified).
if (rb->ndone != rb->size) { if (rb->ndone != rb->size) {
fprintf(stderr, "Header size did not match contents.\n"); fprintf(stderr, "Header size did not match contents.\n");
errno = EINVAL; r = EINVAL;
e = DS_ERRNO;
goto exit; goto exit;
} }
...@@ -319,15 +317,15 @@ deserialize_ft_versioned(int fd, struct rbuf *rb, FT *ftp, uint32_t version) ...@@ -319,15 +317,15 @@ deserialize_ft_versioned(int fd, struct rbuf *rb, FT *ftp, uint32_t version)
if (ft->layout_version_read_from_disk < FT_LAYOUT_VERSION_18) { if (ft->layout_version_read_from_disk < FT_LAYOUT_VERSION_18) {
// This needs ft->h to be non-null, so we have to do it after we // This needs ft->h to be non-null, so we have to do it after we
// read everything else. // read everything else.
e = toku_upgrade_subtree_estimates_to_stat64info(fd, ft); r = toku_upgrade_subtree_estimates_to_stat64info(fd, ft);
if (e != DS_OK) { if (r != 0) {
goto exit; goto exit;
} }
} }
invariant((uint32_t) ft->layout_version_read_from_disk == version); invariant((uint32_t) ft->layout_version_read_from_disk == version);
e = deserialize_descriptor_from(fd, ft->blocktable, &ft->descriptor, version); r = deserialize_descriptor_from(fd, ft->blocktable, &ft->descriptor, version);
if (e != DS_OK) { if (r != 0) {
goto exit; goto exit;
} }
// copy descriptor to cmp_descriptor for #4541 // copy descriptor to cmp_descriptor for #4541
...@@ -338,20 +336,19 @@ deserialize_ft_versioned(int fd, struct rbuf *rb, FT *ftp, uint32_t version) ...@@ -338,20 +336,19 @@ deserialize_ft_versioned(int fd, struct rbuf *rb, FT *ftp, uint32_t version)
// version if it gets written out, we need to write the descriptor in // version if it gets written out, we need to write the descriptor in
// the new format (without those bytes) before that happens. // the new format (without those bytes) before that happens.
if (version <= FT_LAYOUT_VERSION_13) { if (version <= FT_LAYOUT_VERSION_13) {
r = toku_update_descriptor(ft, &ft->cmp_descriptor, fd); r = toku_update_descriptor(ft, &ft->cmp_descriptor, fd);
if (r != 0) { if (r != 0) {
errno = r;
e = DS_ERRNO;
goto exit; goto exit;
} }
} }
r = 0;
exit: exit:
if (e != DS_OK && ft != NULL) { if (r != 0 && ft != NULL) {
toku_free(ft); toku_free(ft);
ft = NULL; ft = NULL;
} }
*ftp = ft; *ftp = ft;
return e; return r;
} }
static u_int32_t static u_int32_t
...@@ -419,12 +416,11 @@ serialize_ft_min_size (u_int32_t version) { ...@@ -419,12 +416,11 @@ serialize_ft_min_size (u_int32_t version) {
// file AND the header is useless // file AND the header is useless
int int
deserialize_ft_from_fd_into_rbuf(int fd, deserialize_ft_from_fd_into_rbuf(int fd,
toku_off_t offset_of_header, toku_off_t offset_of_header,
struct rbuf *rb, struct rbuf *rb,
u_int64_t *checkpoint_count, u_int64_t *checkpoint_count,
LSN *checkpoint_lsn, LSN *checkpoint_lsn,
u_int32_t * version_p, u_int32_t * version_p)
enum deserialize_error_code *e)
{ {
int r = 0; int r = 0;
const int64_t prefix_size = 8 + // magic ("tokudata") const int64_t prefix_size = 8 + // magic ("tokudata")
...@@ -508,9 +504,8 @@ deserialize_ft_from_fd_into_rbuf(int fd, ...@@ -508,9 +504,8 @@ deserialize_ft_from_fd_into_rbuf(int fd,
u_int32_t calculated_x1764 = x1764_memory(rb->buf, rb->size-4); u_int32_t calculated_x1764 = x1764_memory(rb->buf, rb->size-4);
u_int32_t stored_x1764 = toku_dtoh32(*(int*)(rb->buf+rb->size-4)); u_int32_t stored_x1764 = toku_dtoh32(*(int*)(rb->buf+rb->size-4));
if (calculated_x1764 != stored_x1764) { if (calculated_x1764 != stored_x1764) {
r = TOKUDB_DICTIONARY_NO_HEADER; //Header useless r = TOKUDB_BAD_CHECKSUM; //Header useless
fprintf(stderr, "Header checksum failure: calc=0x%08x read=0x%08x\n", calculated_x1764, stored_x1764); fprintf(stderr, "Header checksum failure: calc=0x%08x read=0x%08x\n", calculated_x1764, stored_x1764);
*e = DS_XSUM_FAIL;
goto exit; goto exit;
} }
...@@ -543,10 +538,10 @@ exit: ...@@ -543,10 +538,10 @@ exit:
// Read ft from file into struct. Read both headers and use one. // Read ft from file into struct. Read both headers and use one.
// We want the latest acceptable header whose checkpoint_lsn is no later // We want the latest acceptable header whose checkpoint_lsn is no later
// than max_acceptable_lsn. // than max_acceptable_lsn.
enum deserialize_error_code int
toku_deserialize_ft_from(int fd, toku_deserialize_ft_from(int fd,
LSN max_acceptable_lsn, LSN max_acceptable_lsn,
FT *ft) FT *ft)
{ {
struct rbuf rb_0; struct rbuf rb_0;
struct rbuf rb_1; struct rbuf rb_1;
...@@ -559,18 +554,15 @@ toku_deserialize_ft_from(int fd, ...@@ -559,18 +554,15 @@ toku_deserialize_ft_from(int fd,
BOOL h1_acceptable = FALSE; BOOL h1_acceptable = FALSE;
struct rbuf *rb = NULL; struct rbuf *rb = NULL;
int r0, r1, r; int r0, r1, r;
enum deserialize_error_code e0, e1, e;
toku_off_t header_0_off = 0; toku_off_t header_0_off = 0;
e0 = DS_OK; r0 = deserialize_ft_from_fd_into_rbuf(fd, header_0_off, &rb_0, &checkpoint_count_0, &checkpoint_lsn_0, &version_0);
r0 = deserialize_ft_from_fd_into_rbuf(fd, header_0_off, &rb_0, &checkpoint_count_0, &checkpoint_lsn_0, &version_0, &e0);
if (r0 == 0 && checkpoint_lsn_0.lsn <= max_acceptable_lsn.lsn) { if (r0 == 0 && checkpoint_lsn_0.lsn <= max_acceptable_lsn.lsn) {
h0_acceptable = TRUE; h0_acceptable = TRUE;
} }
toku_off_t header_1_off = BLOCK_ALLOCATOR_HEADER_RESERVE; toku_off_t header_1_off = BLOCK_ALLOCATOR_HEADER_RESERVE;
e1 = DS_OK; r1 = deserialize_ft_from_fd_into_rbuf(fd, header_1_off, &rb_1, &checkpoint_count_1, &checkpoint_lsn_1, &version_1);
r1 = deserialize_ft_from_fd_into_rbuf(fd, header_1_off, &rb_1, &checkpoint_count_1, &checkpoint_lsn_1, &version_1, &e1);
if (r1 == 0 && checkpoint_lsn_1.lsn <= max_acceptable_lsn.lsn) { if (r1 == 0 && checkpoint_lsn_1.lsn <= max_acceptable_lsn.lsn) {
h1_acceptable = TRUE; h1_acceptable = TRUE;
} }
...@@ -585,6 +577,9 @@ toku_deserialize_ft_from(int fd, ...@@ -585,6 +577,9 @@ toku_deserialize_ft_from(int fd,
r = TOKUDB_DICTIONARY_TOO_NEW; r = TOKUDB_DICTIONARY_TOO_NEW;
} else if (r0 == TOKUDB_DICTIONARY_TOO_OLD || r1 == TOKUDB_DICTIONARY_TOO_OLD) { } else if (r0 == TOKUDB_DICTIONARY_TOO_OLD || r1 == TOKUDB_DICTIONARY_TOO_OLD) {
r = TOKUDB_DICTIONARY_TOO_OLD; r = TOKUDB_DICTIONARY_TOO_OLD;
} else if (r0 == TOKUDB_BAD_CHECKSUM && r1 == TOKUDB_BAD_CHECKSUM) {
fprintf(stderr, "Both header checksums failed.\n");
r = TOKUDB_BAD_CHECKSUM;
} else if (r0 == TOKUDB_DICTIONARY_NO_HEADER || r1 == TOKUDB_DICTIONARY_NO_HEADER) { } else if (r0 == TOKUDB_DICTIONARY_NO_HEADER || r1 == TOKUDB_DICTIONARY_NO_HEADER) {
r = TOKUDB_DICTIONARY_NO_HEADER; r = TOKUDB_DICTIONARY_NO_HEADER;
} else { } else {
...@@ -596,13 +591,6 @@ toku_deserialize_ft_from(int fd, ...@@ -596,13 +591,6 @@ toku_deserialize_ft_from(int fd,
invariant(!((r0==0 && checkpoint_lsn_0.lsn > max_acceptable_lsn.lsn) && invariant(!((r0==0 && checkpoint_lsn_0.lsn > max_acceptable_lsn.lsn) &&
(r1==0 && checkpoint_lsn_1.lsn > max_acceptable_lsn.lsn))); (r1==0 && checkpoint_lsn_1.lsn > max_acceptable_lsn.lsn)));
invariant(r!=0); invariant(r!=0);
if (e0 == DS_XSUM_FAIL && e1 == DS_XSUM_FAIL) {
fprintf(stderr, "Both header checksums failed.\n");
e = DS_XSUM_FAIL;
} else {
errno = r;
e = DS_ERRNO;
}
goto exit; goto exit;
} }
...@@ -620,14 +608,14 @@ toku_deserialize_ft_from(int fd, ...@@ -620,14 +608,14 @@ toku_deserialize_ft_from(int fd,
version = version_1; version = version_1;
} }
} else if (h0_acceptable) { } else if (h0_acceptable) {
if (e1 == DS_XSUM_FAIL) { if (r1 == TOKUDB_BAD_CHECKSUM) {
// print something reassuring // print something reassuring
fprintf(stderr, "Header 2 checksum failed, but header 1 ok. Proceeding.\n"); fprintf(stderr, "Header 2 checksum failed, but header 1 ok. Proceeding.\n");
} }
rb = &rb_0; rb = &rb_0;
version = version_0; version = version_0;
} else if (h1_acceptable) { } else if (h1_acceptable) {
if (e0 == DS_XSUM_FAIL) { if (r0 == TOKUDB_BAD_CHECKSUM) {
// print something reassuring // print something reassuring
fprintf(stderr, "Header 1 checksum failed, but header 2 ok. Proceeding.\n"); fprintf(stderr, "Header 1 checksum failed, but header 2 ok. Proceeding.\n");
} }
...@@ -636,7 +624,7 @@ toku_deserialize_ft_from(int fd, ...@@ -636,7 +624,7 @@ toku_deserialize_ft_from(int fd,
} }
invariant(rb); invariant(rb);
e = deserialize_ft_versioned(fd, rb, ft, version); r = deserialize_ft_versioned(fd, rb, ft, version);
exit: exit:
if (rb_0.buf) { if (rb_0.buf) {
...@@ -645,7 +633,7 @@ exit: ...@@ -645,7 +633,7 @@ exit:
if (rb_1.buf) { if (rb_1.buf) {
toku_free(rb_1.buf); toku_free(rb_1.buf);
} }
return e; return r;
} }
......
...@@ -510,16 +510,10 @@ int toku_read_ft_and_store_in_cachefile (FT_HANDLE brt, CACHEFILE cf, LSN max_ac ...@@ -510,16 +510,10 @@ int toku_read_ft_and_store_in_cachefile (FT_HANDLE brt, CACHEFILE cf, LSN max_ac
int r; int r;
{ {
int fd = toku_cachefile_get_fd(cf); int fd = toku_cachefile_get_fd(cf);
enum deserialize_error_code e = toku_deserialize_ft_from(fd, max_acceptable_lsn, &h); r = toku_deserialize_ft_from(fd, max_acceptable_lsn, &h);
if (e == DS_XSUM_FAIL) { if (r == TOKUDB_BAD_CHECKSUM) {
fprintf(stderr, "Checksum failure while reading header in file %s.\n", toku_cachefile_fname_in_env(cf)); fprintf(stderr, "Checksum failure while reading header in file %s.\n", toku_cachefile_fname_in_env(cf));
assert(false); // make absolutely sure we crash before doing anything else assert(false); // make absolutely sure we crash before doing anything else
} else if (e == DS_ERRNO) {
r = errno;
} else if (e == DS_OK) {
r = 0;
} else {
assert(false);
} }
} }
if (r!=0) return r; if (r!=0) return r;
......
...@@ -288,12 +288,6 @@ enum reactivity { ...@@ -288,12 +288,6 @@ enum reactivity {
RE_FISSIBLE RE_FISSIBLE
}; };
enum deserialize_error_code {
DS_OK = 0,
DS_XSUM_FAIL,
DS_ERRNO
};
#if defined(__cplusplus) || defined(__cilkplusplus) #if defined(__cplusplus) || defined(__cilkplusplus)
}; };
#endif #endif
......
...@@ -98,7 +98,7 @@ deserialize_headers(int fd, struct ft **h1p, struct ft **h2p) ...@@ -98,7 +98,7 @@ deserialize_headers(int fd, struct ft **h1p, struct ft **h2p)
BOOL h0_acceptable = FALSE; BOOL h0_acceptable = FALSE;
BOOL h1_acceptable = FALSE; BOOL h1_acceptable = FALSE;
int r0, r1; int r0, r1;
enum deserialize_error_code e = DS_OK; int r;
{ {
toku_off_t header_0_off = 0; toku_off_t header_0_off = 0;
...@@ -108,8 +108,7 @@ deserialize_headers(int fd, struct ft **h1p, struct ft **h2p) ...@@ -108,8 +108,7 @@ deserialize_headers(int fd, struct ft **h1p, struct ft **h2p)
&rb_0, &rb_0,
&checkpoint_count_0, &checkpoint_count_0,
&checkpoint_lsn_0, &checkpoint_lsn_0,
&version_0, &version_0
&e
); );
if ((r0==0) && (checkpoint_lsn_0.lsn <= MAX_LSN.lsn)) { if ((r0==0) && (checkpoint_lsn_0.lsn <= MAX_LSN.lsn)) {
h0_acceptable = TRUE; h0_acceptable = TRUE;
...@@ -123,8 +122,7 @@ deserialize_headers(int fd, struct ft **h1p, struct ft **h2p) ...@@ -123,8 +122,7 @@ deserialize_headers(int fd, struct ft **h1p, struct ft **h2p)
&rb_1, &rb_1,
&checkpoint_count_1, &checkpoint_count_1,
&checkpoint_lsn_1, &checkpoint_lsn_1,
&version_1, &version_1
&e
); );
if ((r1==0) && (checkpoint_lsn_1.lsn <= MAX_LSN.lsn)) { if ((r1==0) && (checkpoint_lsn_1.lsn <= MAX_LSN.lsn)) {
h1_acceptable = TRUE; h1_acceptable = TRUE;
...@@ -138,9 +136,9 @@ deserialize_headers(int fd, struct ft **h1p, struct ft **h2p) ...@@ -138,9 +136,9 @@ deserialize_headers(int fd, struct ft **h1p, struct ft **h2p)
} }
if (h0_acceptable) { if (h0_acceptable) {
printf("Found dictionary header 1 with LSN %"PRIu64"\n", checkpoint_lsn_0.lsn); printf("Found dictionary header 1 with LSN %"PRIu64"\n", checkpoint_lsn_0.lsn);
e = deserialize_ft_versioned(fd, &rb_0, h1p, version_0); r = deserialize_ft_versioned(fd, &rb_0, h1p, version_0);
if (e != DS_OK) { if (r != 0) {
printf("---Header Error----\n"); printf("---Header Error----\n");
} }
...@@ -149,8 +147,8 @@ deserialize_headers(int fd, struct ft **h1p, struct ft **h2p) ...@@ -149,8 +147,8 @@ deserialize_headers(int fd, struct ft **h1p, struct ft **h2p)
} }
if (h1_acceptable) { if (h1_acceptable) {
printf("Found dictionary header 2 with LSN %"PRIu64"\n", checkpoint_lsn_1.lsn); printf("Found dictionary header 2 with LSN %"PRIu64"\n", checkpoint_lsn_1.lsn);
e = deserialize_ft_versioned(fd, &rb_1, h2p, version_1); r = deserialize_ft_versioned(fd, &rb_1, h2p, version_1);
if (e != DS_OK) { if (r != 0) {
printf("---Header Error----\n"); printf("---Header Error----\n");
} }
} else { } else {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment