Commit ee6f11b3 authored by Rusty Russell's avatar Rusty Russell

tdb2: traverse and chainlock support.

These tests revealed more bugs with delete (rewritten) and hash enlargement.
parent 7607ced7
......@@ -719,7 +719,8 @@ void tdb_unlock_free_list(struct tdb_context *tdb, tdb_off_t flist)
+ flist, F_WRLCK);
}
#if 0
/* Even if the entry isn't in this hash bucket, you'd have to lock this
* bucket to find it. */
static int chainlock(struct tdb_context *tdb, const TDB_DATA *key,
int ltype, enum tdb_lock_flags waitflag,
const char *func)
......@@ -739,6 +740,14 @@ int tdb_chainlock(struct tdb_context *tdb, TDB_DATA key)
return chainlock(tdb, &key, F_WRLCK, TDB_LOCK_WAIT, "tdb_chainlock");
}
int tdb_chainunlock(struct tdb_context *tdb, TDB_DATA key)
{
uint64_t h = tdb_hash(tdb, key.dptr, key.dsize);
tdb_trace_1rec(tdb, "tdb_chainunlock", key);
return tdb_unlock_list(tdb, h, F_WRLCK);
}
#if 0
/* lock/unlock one hash chain, non-blocking. This is meant to be used
to reduce contention - it cannot guarantee how many records will be
locked */
......@@ -748,14 +757,6 @@ int tdb_chainlock_nonblock(struct tdb_context *tdb, TDB_DATA key)
"tdb_chainlock_nonblock");
}
int tdb_chainunlock(struct tdb_context *tdb, TDB_DATA key)
{
uint64_t h = tdb_hash(tdb, key.dptr, key.dsize);
tdb_trace_1rec(tdb, "tdb_chainunlock", key);
return tdb_unlock_list(tdb, h & ((1ULL << tdb->header.v.hash_bits)-1),
F_WRLCK);
}
int tdb_chainlock_read(struct tdb_context *tdb, TDB_DATA key)
{
return chainlock(tdb, &key, F_RDLCK, TDB_LOCK_WAIT,
......
......@@ -263,6 +263,9 @@ int write_header(struct tdb_context *tdb);
/* Hash random memory. */
uint64_t tdb_hash(struct tdb_context *tdb, const void *ptr, size_t len);
/* offset of hash table entry for this list/hash value */
tdb_off_t hash_off(struct tdb_context *tdb, uint64_t list);
/* free.c: */
void tdb_zone_init(struct tdb_context *tdb);
......
......@@ -394,7 +394,7 @@ struct tdb_context *tdb_open(const char *name, int tdb_flags,
return NULL;
}
static tdb_off_t hash_off(struct tdb_context *tdb, uint64_t list)
tdb_off_t hash_off(struct tdb_context *tdb, uint64_t list)
{
return tdb->header.v.hash_off
+ ((list & ((1ULL << tdb->header.v.hash_bits) - 1))
......@@ -561,16 +561,24 @@ static int hash_add(struct tdb_context *tdb,
if (unlikely(num == len)) {
/* We wrapped. Look through start of hash table. */
i = 0;
hoff = hash_off(tdb, 0);
len = (1ULL << tdb->header.v.hash_bits);
num = tdb_find_zero_off(tdb, hoff, len);
if (i == len) {
if (num == len) {
tdb->ecode = TDB_ERR_CORRUPT;
tdb->log(tdb, TDB_DEBUG_FATAL, tdb->log_priv,
"hash_add: full hash table!\n");
return -1;
}
}
if (tdb_read_off(tdb, hash_off(tdb, i + num)) != 0) {
tdb->ecode = TDB_ERR_CORRUPT;
tdb->log(tdb, TDB_DEBUG_FATAL, tdb->log_priv,
"hash_add: overwriting hash table?\n");
return -1;
}
/* FIXME: Encode extra hash bits! */
return tdb_write_off(tdb, hash_off(tdb, i + num), off);
}
......@@ -582,6 +590,7 @@ static void enlarge_hash(struct tdb_context *tdb)
tdb_len_t hlen;
uint64_t num = 1ULL << tdb->header.v.hash_bits;
struct tdb_used_record pad, *r;
unsigned int records = 0;
/* FIXME: We should do this without holding locks throughout. */
if (tdb_allrecord_lock(tdb, F_WRLCK, TDB_LOCK_WAIT, false) == -1)
......@@ -624,8 +633,14 @@ again:
goto oldheader;
if (off && hash_add(tdb, hash_record(tdb, off), off) == -1)
goto oldheader;
if (off)
records++;
}
tdb->log(tdb, TDB_DEBUG_TRACE, tdb->log_priv,
"enlarge_hash: moved %u records from %llu buckets.\n",
records, (long long)num);
/* Free up old hash. */
r = tdb_get(tdb, oldoff - sizeof(*r), &pad, sizeof(*r));
if (!r)
......@@ -645,6 +660,42 @@ oldheader:
goto unlock;
}
/* This is the slow version of the routine which searches the
* hashtable for an entry.
* We lock every hash bucket up to and including the next zero one.
*/
static tdb_off_t find_and_lock_slow(struct tdb_context *tdb,
struct tdb_data key,
uint64_t h,
int ltype,
tdb_off_t *start_lock,
tdb_len_t *num_locks,
tdb_off_t *bucket,
struct tdb_used_record *rec)
{
/* Warning: this may drop the lock on *bucket! */
*num_locks = relock_hash_to_zero(tdb, *start_lock, ltype);
if (*num_locks == TDB_OFF_ERR)
return TDB_OFF_ERR;
for (*bucket = *start_lock;
*bucket < *start_lock + *num_locks;
(*bucket)++) {
tdb_off_t off = entry_matches(tdb, *bucket, h, &key, rec);
/* Empty entry or we found it? */
if (off == 0 || off != TDB_OFF_ERR)
return off;
}
/* We didn't find a zero entry? Something went badly wrong... */
unlock_lists(tdb, *start_lock, *start_lock + *num_locks, ltype);
tdb->ecode = TDB_ERR_CORRUPT;
tdb->log(tdb, TDB_DEBUG_FATAL, tdb->log_priv,
"find_and_lock: expected to find an empty hash bucket!\n");
return TDB_OFF_ERR;
}
/* This is the core routine which searches the hashtable for an entry.
* On error, no locks are held and TDB_OFF_ERR is returned.
* Otherwise, *num_locks locks of type ltype from *start_lock are held.
......@@ -676,27 +727,8 @@ static tdb_off_t find_and_lock(struct tdb_context *tdb,
}
/* Slow path, need to grab more locks and search. */
/* Warning: this may drop the lock on *bucket! */
*num_locks = relock_hash_to_zero(tdb, *start_lock, ltype);
if (*num_locks == TDB_OFF_ERR)
return TDB_OFF_ERR;
for (*bucket = *start_lock;
*bucket < *start_lock + *num_locks;
(*bucket)++) {
off = entry_matches(tdb, *bucket, h, &key, rec);
/* Empty entry or we found it? */
if (off == 0 || off != TDB_OFF_ERR)
return off;
}
/* We didn't find a zero entry? Something went badly wrong... */
unlock_lists(tdb, *start_lock, *start_lock + *num_locks, ltype);
tdb->ecode = TDB_ERR_CORRUPT;
tdb->log(tdb, TDB_DEBUG_FATAL, tdb->log_priv,
"find_and_lock: expected to find an empty hash bucket!\n");
return TDB_OFF_ERR;
return find_and_lock_slow(tdb, key, h, ltype, start_lock, num_locks,
bucket, rec);
}
/* Returns -1 on error, 0 on OK, 1 on "expand and retry." */
......@@ -710,7 +742,10 @@ static int replace_data(struct tdb_context *tdb,
/* Allocate a new record. */
new_off = alloc(tdb, key.dsize, dbuf.dsize, h, growing);
if (new_off == 0)
if (unlikely(new_off == TDB_OFF_ERR))
return -1;
if (unlikely(new_off == 0))
return 1;
/* We didn't like the existing one: remove it. */
......@@ -883,7 +918,7 @@ int tdb_append(struct tdb_context *tdb,
if (unlikely(num > 4 * tdb->header.v.hash_bits - 30))
enlarge_hash(tdb);
return 0;
return ret;
fail:
unlock_lists(tdb, start, num, F_WRLCK);
......@@ -922,28 +957,38 @@ int tdb_delete(struct tdb_context *tdb, struct tdb_data key)
uint64_t h;
h = tdb_hash(tdb, key.dptr, key.dsize);
off = find_and_lock(tdb, key, h, F_WRLCK, &start, &num, &bucket, &rec);
start = tdb_lock_list(tdb, h, F_WRLCK, TDB_LOCK_WAIT);
if (unlikely(start == TDB_OFF_ERR))
return -1;
/* FIXME: Fastpath: if next is zero, we can delete without lock,
* since this lock protects us. */
off = find_and_lock_slow(tdb, key, h, F_WRLCK,
&start, &num, &bucket, &rec);
if (unlikely(off == TDB_OFF_ERR))
return -1;
if (!off) {
/* FIXME: We could optimize not found case if it mattered, by
* reading offset after first lock: if it's zero, goto here. */
unlock_lists(tdb, start, num, F_WRLCK);
tdb->ecode = TDB_ERR_NOEXIST;
return -1;
}
/* Since we found the entry, we must have locked it and a zero. */
assert(num >= 2);
/* This actually unlinks it. */
if (tdb_write_off(tdb, hash_off(tdb, bucket), 0) == -1)
goto unlock_err;
/* Rehash anything following. */
for (i = hash_off(tdb, bucket+1);
i != hash_off(tdb, h + num - 1);
i += sizeof(tdb_off_t)) {
tdb_off_t off2;
for (i = bucket+1; i != bucket + num - 1; i++) {
tdb_off_t hoff, off2;
uint64_t h2;
off2 = tdb_read_off(tdb, i);
hoff = hash_off(tdb, i);
off2 = tdb_read_off(tdb, hoff);
if (unlikely(off2 == TDB_OFF_ERR))
goto unlock_err;
......@@ -954,11 +999,11 @@ int tdb_delete(struct tdb_context *tdb, struct tdb_data key)
/* Maybe use a bit to indicate it is in ideal place? */
h2 = hash_record(tdb, off2);
/* Is it happy where it is? */
if (hash_off(tdb, h2) == i)
if (hash_off(tdb, h2) == hoff)
continue;
/* Remove it. */
if (tdb_write_off(tdb, i, 0) == -1)
if (tdb_write_off(tdb, hoff, 0) == -1)
goto unlock_err;
/* Rehash it. */
......
......@@ -89,6 +89,7 @@ typedef struct tdb_data {
struct tdb_context;
/* FIXME: Make typesafe */
typedef int (*tdb_traverse_func)(struct tdb_context *, TDB_DATA, TDB_DATA, void *);
typedef void (*tdb_logfn_t)(struct tdb_context *, enum tdb_debug_level, void *priv, const char *, ...) PRINTF_ATTRIBUTE(4, 5);
typedef uint64_t (*tdb_hashfn_t)(const void *key, size_t len, uint64_t seed,
void *priv);
......@@ -129,6 +130,11 @@ struct tdb_data tdb_fetch(struct tdb_context *tdb, struct tdb_data key);
int tdb_delete(struct tdb_context *tdb, struct tdb_data key);
int tdb_store(struct tdb_context *tdb, struct tdb_data key, struct tdb_data dbuf, int flag);
int tdb_append(struct tdb_context *tdb, struct tdb_data key, struct tdb_data dbuf);
int tdb_chainlock(struct tdb_context *tdb, TDB_DATA key);
int tdb_chainunlock(struct tdb_context *tdb, TDB_DATA key);
int64_t tdb_traverse(struct tdb_context *tdb, tdb_traverse_func fn, void *p);
int64_t tdb_traverse_read(struct tdb_context *tdb,
tdb_traverse_func fn, void *p);
int tdb_close(struct tdb_context *tdb);
int tdb_check(struct tdb_context *tdb,
int (*check)(TDB_DATA key, TDB_DATA data, void *private_data),
......
......@@ -153,13 +153,6 @@ static void add_to_freetable(struct tdb_context *tdb,
add_free_record(tdb, eoff, sizeof(struct tdb_used_record) + elen);
}
static tdb_off_t hash_off(struct tdb_context *tdb, uint64_t list)
{
return tdb->header.v.hash_off
+ ((list & ((1ULL << tdb->header.v.hash_bits) - 1))
* sizeof(tdb_off_t));
}
static void add_to_hashtable(struct tdb_context *tdb,
tdb_off_t eoff,
struct tdb_data key)
......
/* Another test revealed that we lost an entry. This reproduces it. */
#include <ccan/tdb2/tdb.c>
#include <ccan/tdb2/free.c>
#include <ccan/tdb2/lock.c>
#include <ccan/tdb2/io.c>
#include <ccan/tdb2/check.c>
#include <ccan/tdb2/traverse.c>
#include <ccan/tap/tap.h>
#include "logging.h"
#define NUM_RECORDS 1189
/* We use the same seed which we saw this failure on. */
static uint64_t failhash(const void *key, size_t len, uint64_t seed, void *p)
{
seed = 699537674708983027ULL;
return hash64_stable((const unsigned char *)key, len, seed);
}
int main(int argc, char *argv[])
{
int i;
struct tdb_context *tdb;
struct tdb_data key = { (unsigned char *)&i, sizeof(i) };
struct tdb_data data = { (unsigned char *)&i, sizeof(i) };
union tdb_attribute hattr = { .hash = { .base = { TDB_ATTRIBUTE_HASH },
.hash_fn = failhash } };
hattr.base.next = &tap_log_attr;
plan_tests(1 + 2 * NUM_RECORDS + 1);
tdb = tdb_open("run-missing-entries.tdb", TDB_INTERNAL,
O_RDWR|O_CREAT|O_TRUNC, 0600, &hattr);
ok1(tdb);
if (tdb) {
for (i = 0; i < NUM_RECORDS; i++) {
ok1(tdb_store(tdb, key, data, TDB_REPLACE) == 0);
ok1(tdb_check(tdb, NULL, NULL) == 0);
}
tdb_close(tdb);
}
ok1(tap_log_messages == 0);
return exit_status();
}
/*
Trivial Database 2: traverse function.
Copyright (C) Rusty Russell 2010
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 3 of the License, or (at your option) any later version.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this library; if not, see <http://www.gnu.org/licenses/>.
*/
#include "private.h"
#include <ccan/likely/likely.h>
static int64_t traverse(struct tdb_context *tdb, int ltype,
tdb_traverse_func fn, void *p)
{
uint64_t i, num, count = 0;
tdb_off_t off, prev_bucket;
struct tdb_used_record rec;
struct tdb_data k, d;
bool finish = false;
/* FIXME: Do we need to start at 0? */
prev_bucket = tdb_lock_list(tdb, 0, ltype, TDB_LOCK_WAIT);
if (prev_bucket != 0)
return -1;
num = (1ULL << tdb->header.v.hash_bits);
for (i = tdb_find_nonzero_off(tdb, hash_off(tdb, 0), num);
i != num && !finish;
i += tdb_find_nonzero_off(tdb, hash_off(tdb, i), num - i)) {
if (tdb_lock_list(tdb, i, ltype, TDB_LOCK_WAIT) != i)
goto fail;
off = tdb_read_off(tdb, hash_off(tdb, i));
if (off == TDB_OFF_ERR) {
tdb_unlock_list(tdb, i, ltype);
goto fail;
}
/* This race can happen, but look again. */
if (off == 0) {
tdb_unlock_list(tdb, i, ltype);
continue;
}
/* Drop previous lock. */
tdb_unlock_list(tdb, prev_bucket, ltype);
prev_bucket = i;
if (tdb_read_convert(tdb, off, &rec, sizeof(rec)) != 0)
goto fail;
k.dsize = rec_key_length(&rec);
d.dsize = rec_data_length(&rec);
if (ltype == F_RDLCK) {
/* Read traverses can keep the lock. */
k.dptr = (void *)tdb_access_read(tdb,
off + sizeof(rec),
k.dsize + d.dsize,
false);
} else {
k.dptr = tdb_alloc_read(tdb, off + sizeof(rec),
k.dsize + d.dsize);
}
if (!k.dptr)
goto fail;
d.dptr = k.dptr + k.dsize;
count++;
if (ltype == F_WRLCK) {
/* Drop lock before calling out. */
tdb_unlock_list(tdb, i, ltype);
}
if (fn && fn(tdb, k, d, p))
finish = true;
if (ltype == F_WRLCK) {
free(k.dptr);
/* Regain lock. FIXME: Is this necessary? */
if (tdb_lock_list(tdb, i, ltype, TDB_LOCK_WAIT) != i)
return -1;
/* This makes deleting under ourselves a bit nicer. */
if (tdb_read_off(tdb, hash_off(tdb, i)) == off)
i++;
} else {
tdb_access_release(tdb, k.dptr);
i++;
}
}
/* Drop final lock. */
tdb_unlock_list(tdb, prev_bucket, ltype);
return count;
fail:
tdb_unlock_list(tdb, prev_bucket, ltype);
return -1;
}
int64_t tdb_traverse(struct tdb_context *tdb, tdb_traverse_func fn, void *p)
{
return traverse(tdb, F_WRLCK, fn, p);
}
int64_t tdb_traverse_read(struct tdb_context *tdb,
tdb_traverse_func fn, void *p)
{
int64_t ret;
bool was_ro = tdb->read_only;
tdb->read_only = true;
ret = traverse(tdb, F_RDLCK, fn, p);
tdb->read_only = was_ro;
return ret;
}
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment