Commit e2e0fd9e authored by Rich Prohaska's avatar Rich Prohaska Committed by Yoni Fogel

#3520 lock tree test coverage refs[t:3520]

git-svn-id: file:///svn/toku/tokudb@42986 c7de825b-a66e-492c-adef-691d508d4ae1
parent 85da482b
......@@ -33,14 +33,10 @@ int toku_idlth_create(toku_idlth** pidlth) {
int r = ENOSYS;
assert(pidlth);
toku_idlth* tmp = NULL;
tmp = (toku_idlth*) toku_malloc(sizeof(*tmp));
if (!tmp) { r = ENOMEM; goto cleanup; }
tmp = (toku_idlth*) toku_xmalloc(sizeof(*tmp));
memset(tmp, 0, sizeof(*tmp));
tmp->num_buckets = __toku_idlth_init_size;
tmp->buckets = (toku_idlth_elt*)
toku_malloc(tmp->num_buckets * sizeof(*tmp->buckets));
if (!tmp->buckets) { r = ENOMEM; goto cleanup; }
tmp->buckets = (toku_idlth_elt*) toku_xmalloc(tmp->num_buckets * sizeof(*tmp->buckets));
memset(tmp->buckets, 0, tmp->num_buckets * sizeof(*tmp->buckets));
toku__invalidate_scan(tmp);
tmp->iter_head.next_in_iteration = &tmp->iter_head;
......@@ -48,13 +44,6 @@ int toku_idlth_create(toku_idlth** pidlth) {
*pidlth = tmp;
r = 0;
cleanup:
if (r != 0) {
if (tmp) {
if (tmp->buckets) { toku_free(tmp->buckets); }
toku_free(tmp);
}
}
return r;
}
......@@ -129,8 +118,7 @@ int toku_idlth_insert(toku_idlth* idlth, DICTIONARY_ID dict_id) {
uint32_t index = toku__idlth_hash(idlth, dict_id);
/* Allocate a new one. */
toku_idlth_elt* element = (toku_idlth_elt*) toku_malloc(sizeof(*element));
if (!element) { r = ENOMEM; goto cleanup; }
toku_idlth_elt* element = (toku_idlth_elt*) toku_xmalloc(sizeof(*element));
memset(element, 0, sizeof(*element));
element->value.dict_id = dict_id;
......@@ -144,7 +132,6 @@ int toku_idlth_insert(toku_idlth* idlth, DICTIONARY_ID dict_id) {
idlth->num_keys++;
r = 0;
cleanup:
return r;
}
......
......@@ -32,14 +32,10 @@ int toku_rth_create(toku_rth** prth) {
int r = ENOSYS;
assert(prth);
toku_rth* tmp = NULL;
tmp = (toku_rth*) toku_malloc(sizeof(*tmp));
if (!tmp) { r = ENOMEM; goto cleanup; }
tmp = (toku_rth*) toku_xmalloc(sizeof(*tmp));
memset(tmp, 0, sizeof(*tmp));
tmp->num_buckets = __toku_rth_init_size;
tmp->buckets = (toku_rth_elt*)
toku_malloc(tmp->num_buckets * sizeof(*tmp->buckets));
if (!tmp->buckets) { r = ENOMEM; goto cleanup; }
tmp->buckets = (toku_rth_elt*) toku_xmalloc(tmp->num_buckets * sizeof(*tmp->buckets));
memset(tmp->buckets, 0, tmp->num_buckets * sizeof(*tmp->buckets));
toku__invalidate_scan(tmp);
tmp->iter_head.next_in_iteration = &tmp->iter_head;
......@@ -47,13 +43,6 @@ int toku_rth_create(toku_rth** prth) {
*prth = tmp;
r = 0;
cleanup:
if (r != 0) {
if (tmp) {
if (tmp->buckets) { toku_free(tmp->buckets); }
toku_free(tmp);
}
}
return r;
}
......@@ -128,8 +117,7 @@ int toku_rth_insert(toku_rth* rth, TXNID key) {
uint32_t index = toku__rth_hash(rth, key);
/* Allocate a new one. */
toku_rth_elt* element = (toku_rth_elt*) toku_malloc(sizeof(*element));
if (!element) { r = ENOMEM; goto cleanup; }
toku_rth_elt* element = (toku_rth_elt*) toku_xmalloc(sizeof(*element));
memset(element, 0, sizeof(*element));
element->value.hash_key = key;
element->next_in_iteration = rth->iter_head.next_in_iteration;
......@@ -142,7 +130,6 @@ int toku_rth_insert(toku_rth* rth, TXNID key) {
rth->num_keys++;
r = 0;
cleanup:
return r;
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment