Commit a6aae744 authored by John Esmet's avatar John Esmet

Add support for less than 4 dbs for perf iibench

parent 9a6ba1aa
......@@ -108,7 +108,6 @@ PATENT RIGHTS GRANT:
// The secondary keys have the primary key appended to them.
//
static const int iibench_num_dbs = 4;
static const size_t iibench_secondary_key_size = 16;
struct iibench_row {
......@@ -197,7 +196,7 @@ static int iibench_get_db_idx(DB *db) {
static void iibench_rangequery_cb(DB *db, const DBT *key, const DBT *val, void *extra) {
invariant_null(extra);
int db_idx = iibench_get_db_idx(db);
const int db_idx = iibench_get_db_idx(db);
if (db_idx == 0) {
struct iibench_row row;
iibench_parse_row(key, val, &row);
......@@ -214,11 +213,12 @@ struct iibench_put_op_extra {
};
static int UU() iibench_put_op(DB_TXN *txn, ARG arg, void *operation_extra, void *stats_extra) {
const int num_dbs = arg->cli->num_DBs;
DB **dbs = arg->dbp;
DB_ENV *env = arg->env;
DBT mult_key_dbt[iibench_num_dbs];
DBT mult_val_dbt[iibench_num_dbs];
uint32_t mult_put_flags[iibench_num_dbs];
DBT mult_key_dbt[num_dbs];
DBT mult_val_dbt[num_dbs];
uint32_t mult_put_flags[num_dbs];
memset(mult_key_dbt, 0, sizeof(mult_key_dbt));
memset(mult_val_dbt, 0, sizeof(mult_val_dbt));
......@@ -227,7 +227,7 @@ static int UU() iibench_put_op(DB_TXN *txn, ARG arg, void *operation_extra, void
mult_put_flags[0] = get_put_flags(arg->cli) |
// If the table was already created, don't check for uniqueness.
(arg->cli->num_elements > 0 ? 0 : DB_NOOVERWRITE);
for (int i = 1; i < iibench_num_dbs; i++) {
for (int i = 1; i < num_dbs; i++) {
mult_key_dbt[i].flags = DB_DBT_REALLOC;
mult_put_flags[i] = get_put_flags(arg->cli);
}
......@@ -256,7 +256,7 @@ static int UU() iibench_put_op(DB_TXN *txn, ARG arg, void *operation_extra, void
txn,
&mult_key_dbt[0], // source db key
&mult_val_dbt[0], // source db value
iibench_num_dbs, // total number of dbs
num_dbs, // total number of dbs
dbs, // array of dbs
mult_key_dbt, // array of keys
mult_val_dbt, // array of values
......@@ -273,7 +273,7 @@ static int UU() iibench_put_op(DB_TXN *txn, ARG arg, void *operation_extra, void
}
cleanup:
for (int i = 1; i < iibench_num_dbs; i++) {
for (int i = 1; i < num_dbs; i++) {
toku_free(mult_key_dbt[i].data);
}
return r;
......@@ -295,8 +295,7 @@ static int iibench_generate_row_for_put(DB *dest_db, DB *src_db, DBT *dest_key,
// so it has to be greater than zero (which would be the pk). Then
// grab the appropriate secondary key from the source val, which is
// an array of the 3 columns, so we have to subtract 1 from the index.
int db_idx = iibench_get_db_idx(dest_db);
invariant(db_idx > 0 && db_idx < 4);
const int db_idx = iibench_get_db_idx(dest_db);
int64_t *CAST_FROM_VOIDP(columns, src_val->data);
int64_t secondary_key = columns[db_idx - 1];
......@@ -326,7 +325,7 @@ static DB *iibench_set_descriptor_after_db_opens(DB_ENV *env, DB *db, int idx, r
}
static int iibench_compare_keys(DB *db, const DBT *a, const DBT *b) {
int db_idx = iibench_get_db_idx(db);
const int db_idx = iibench_get_db_idx(db);
if (db_idx == 0) {
invariant(a->size == 8);
invariant(b->size == 8);
......@@ -409,20 +408,21 @@ static int iibench_rangequery_op(DB_TXN *txn, ARG arg, void *operation_extra, vo
}
static int iibench_fill_tables(DB_ENV *env, DB **dbs, struct cli_args *cli_args, bool UU(fill_with_zeroes)) {
const int num_dbs = cli_args->num_DBs;
int r = 0;
DB_TXN *txn;
r = env->txn_begin(env, 0, &txn, 0); CKERR(r);
DB_LOADER *loader;
uint32_t db_flags[4];
uint32_t dbt_flags[4];
for (int i = 0; i < 4; i++) {
uint32_t db_flags[num_dbs];
uint32_t dbt_flags[num_dbs];
for (int i = 0; i < num_dbs; i++) {
db_flags[i] = DB_PRELOCKED_WRITE;
dbt_flags[i] = DB_DBT_REALLOC;
}
r = env->create_loader(env, txn, &loader, dbs[0], 4, dbs, db_flags, dbt_flags, 0); CKERR(r);
r = env->create_loader(env, txn, &loader, dbs[0], num_dbs, dbs, db_flags, dbt_flags, 0); CKERR(r);
for (int i = 0; i < cli_args->num_elements; i++) {
DBT key, val;
uint64_t pk = i;
......@@ -433,6 +433,9 @@ static int iibench_fill_tables(DB_ENV *env, DB **dbs, struct cli_args *cli_args,
dbt_init(&key, keybuf, sizeof keybuf);
dbt_init(&val, valbuf, sizeof valbuf);
r = loader->put(loader, &key, &val); CKERR(r);
if (verbose && i > 0 && i % 10000 == 0) {
report_overall_fill_table_progress(cli_args, 10000);
}
}
r = loader->close(loader); CKERR(r);
......@@ -473,12 +476,12 @@ int test_main(int argc, char *const argv[]) {
args.num_elements = 0; // want to start with empty DBs
// Puts per transaction is configurable. It defaults to 1k.
args.txn_size = 1000;
// Default to one writer, no readers.
// Default to one writer on 4 indexes (pk + 3 secondaries), no readers.
args.num_DBs = 4;
args.num_put_threads = 1;
args.num_ptquery_threads = 0;
parse_stress_test_args(argc, argv, &args);
// The index count and schema are not configurable. Silently ignore whatever was passed in.
args.num_DBs = 4;
// The schema is not configurable. Silently ignore whatever was passed in.
args.key_size = 8;
args.val_size = 32;
// when there are multiple threads, its valid for two of them to
......
......@@ -164,7 +164,7 @@ test_main(int argc, char *const argv[]) {
// we expect to get lock_notgranted op failures, and we
// don't want the overhead of fsync on small txns
args.crash_on_operation_failure = false;
args.nosync = true;
args.env_args.sync_period = 100; // speed up the test by not fsyncing very often
stress_test_main(&args);
return 0;
}
......@@ -153,6 +153,7 @@ struct env_args {
int checkpointing_period;
int cleaner_period;
int cleaner_iterations;
int sync_period;
uint64_t lk_max_memory;
uint64_t cachetable_size;
uint32_t num_bucket_mutexes;
......@@ -202,7 +203,7 @@ struct cli_args {
bool blackhole; // all message injects are no-ops. helps measure txn/logging/locktree overhead.
bool nolocktree; // use this flag to avoid the locktree on insertions
bool unique_checks; // use uniqueness checking during insert. makes it slow.
bool nosync; // do not fsync on txn commit. useful for testing in memory performance.
uint32_t sync_period; // background log fsync period
bool nolog; // do not log. useful for testing in memory performance.
bool nocrashstatus; // do not print engine status upon crash
bool prelock_updates; // update threads perform serial updates on a prelocked range
......@@ -512,7 +513,7 @@ static int get_put_flags(struct cli_args *args) {
static int get_commit_flags(struct cli_args *args) {
int flags = 0;
flags |= args->nosync ? DB_TXN_NOSYNC : 0;
flags |= args->env_args.sync_period > 0 ? DB_TXN_NOSYNC : 0;
return flags;
}
......@@ -1903,6 +1904,7 @@ static int create_tables(DB_ENV **env_res, DB **db_res, int num_DBs,
r = env->checkpointing_set_period(env, env_args.checkpointing_period); CKERR(r);
r = env->cleaner_set_period(env, env_args.cleaner_period); CKERR(r);
r = env->cleaner_set_iterations(env, env_args.cleaner_iterations); CKERR(r);
env->change_fsync_log_period(env, env_args.sync_period);
*env_res = env;
for (int i = 0; i < num_DBs; i++) {
......@@ -1996,9 +1998,8 @@ static void fill_single_table(DB_ENV *env, DB *db, struct cli_args *args, bool f
report_overall_fill_table_progress(args, puts_per_txn);
}
// begin a new txn if we're not using the loader,
// don't bother fsyncing to disk.
if (loader == nullptr) {
r = txn->commit(txn, DB_TXN_NOSYNC); CKERR(r);
r = txn->commit(txn, 0); CKERR(r);
r = env->txn_begin(env, 0, &txn, 0); CKERR(r);
}
}
......@@ -2101,6 +2102,7 @@ static int open_tables(DB_ENV **env_res, DB **db_res, int num_DBs,
r = env->checkpointing_set_period(env, env_args.checkpointing_period); CKERR(r);
r = env->cleaner_set_period(env, env_args.cleaner_period); CKERR(r);
r = env->cleaner_set_iterations(env, env_args.cleaner_iterations); CKERR(r);
env->change_fsync_log_period(env, env_args.sync_period);
*env_res = env;
for (int i = 0; i < num_DBs; i++) {
......@@ -2129,6 +2131,7 @@ static const struct env_args DEFAULT_ENV_ARGS = {
.checkpointing_period = 10,
.cleaner_period = 1,
.cleaner_iterations = 1,
.sync_period = 0,
.lk_max_memory = 1L * 1024 * 1024 * 1024,
.cachetable_size = 300000,
.num_bucket_mutexes = 1024,
......@@ -2145,6 +2148,7 @@ static const struct env_args DEFAULT_PERF_ENV_ARGS = {
.checkpointing_period = 60,
.cleaner_period = 1,
.cleaner_iterations = 5,
.sync_period = 0,
.lk_max_memory = 1L * 1024 * 1024 * 1024,
.cachetable_size = 1<<30,
.num_bucket_mutexes = 1024 * 1024,
......@@ -2188,7 +2192,7 @@ static struct cli_args UU() get_default_args(void) {
.blackhole = false,
.nolocktree = false,
.unique_checks = false,
.nosync = false,
.sync_period = 0,
.nolog = false,
.nocrashstatus = false,
.prelock_updates = false,
......@@ -2539,6 +2543,7 @@ static inline void parse_stress_test_args (int argc, char *const argv[], struct
INT32_ARG_NONNEG("--checkpointing_period", env_args.checkpointing_period, "s"),
INT32_ARG_NONNEG("--cleaner_period", env_args.cleaner_period, "s"),
INT32_ARG_NONNEG("--cleaner_iterations", env_args.cleaner_iterations, ""),
INT32_ARG_NONNEG("--sync_period", env_args.sync_period, "ms"),
INT32_ARG_NONNEG("--update_broadcast_period", update_broadcast_period_ms, "ms"),
INT32_ARG_NONNEG("--num_ptquery_threads", num_ptquery_threads, " threads"),
INT32_ARG_NONNEG("--num_put_threads", num_put_threads, " threads"),
......@@ -2575,7 +2580,6 @@ static inline void parse_stress_test_args (int argc, char *const argv[], struct
BOOL_ARG("blackhole", blackhole),
BOOL_ARG("nolocktree", nolocktree),
BOOL_ARG("unique_checks", unique_checks),
BOOL_ARG("nosync", nosync),
BOOL_ARG("nolog", nolog),
BOOL_ARG("nocrashstatus", nocrashstatus),
BOOL_ARG("prelock_updates", prelock_updates),
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment