Commit 44ed167d authored by Philipp Reisner's avatar Philipp Reisner

drbd: rcu_read_lock() and rcu_dereference() for tconn->net_conf

Removing the get_net_conf()/put_net_conf() calls
Signed-off-by: default avatarPhilipp Reisner <philipp.reisner@linbit.com>
Signed-off-by: default avatarLars Ellenberg <lars.ellenberg@linbit.com>
parent b032b6fa
......@@ -832,7 +832,7 @@ struct drbd_tconn { /* is a resource from the config file */
struct mutex cstate_mutex; /* Protects graceful disconnects */
unsigned long flags;
struct net_conf *net_conf; /* protected by get_net_conf() and put_net_conf() */
struct net_conf *net_conf; /* content protected by rcu */
atomic_t net_cnt; /* Users of net_conf */
wait_queue_head_t net_cnt_wait;
wait_queue_head_t ping_wait; /* Woken upon reception of a ping, and a state change */
......@@ -2059,11 +2059,14 @@ static inline void drbd_get_syncer_progress(struct drbd_conf *mdev,
* maybe re-implement using semaphores? */
static inline int drbd_get_max_buffers(struct drbd_conf *mdev)
{
int mxb = 1000000; /* arbitrary limit on open requests */
if (get_net_conf(mdev->tconn)) {
mxb = mdev->tconn->net_conf->max_buffers;
put_net_conf(mdev->tconn);
}
struct net_conf *nc;
int mxb;
rcu_read_lock();
nc = rcu_dereference(mdev->tconn->net_conf);
mxb = nc ? nc->max_buffers : 1000000; /* arbitrary limit on open requests */
rcu_read_unlock();
return mxb;
}
......
......@@ -843,15 +843,19 @@ int drbd_send_sync_param(struct drbd_conf *mdev)
int size;
const int apv = mdev->tconn->agreed_pro_version;
enum drbd_packet cmd;
struct net_conf *nc;
sock = &mdev->tconn->data;
p = drbd_prepare_command(mdev, sock);
if (!p)
return -EIO;
rcu_read_lock();
nc = rcu_dereference(mdev->tconn->net_conf);
size = apv <= 87 ? sizeof(struct p_rs_param)
: apv == 88 ? sizeof(struct p_rs_param)
+ strlen(mdev->tconn->net_conf->verify_alg) + 1
+ strlen(nc->verify_alg) + 1
: apv <= 94 ? sizeof(struct p_rs_param_89)
: /* apv >= 95 */ sizeof(struct p_rs_param_95);
......@@ -876,9 +880,10 @@ int drbd_send_sync_param(struct drbd_conf *mdev)
}
if (apv >= 88)
strcpy(p->verify_alg, mdev->tconn->net_conf->verify_alg);
strcpy(p->verify_alg, nc->verify_alg);
if (apv >= 89)
strcpy(p->csums_alg, mdev->tconn->net_conf->csums_alg);
strcpy(p->csums_alg, nc->csums_alg);
rcu_read_unlock();
return drbd_send_command(mdev, sock, cmd, size, NULL, 0);
}
......@@ -887,36 +892,44 @@ int drbd_send_protocol(struct drbd_tconn *tconn)
{
struct drbd_socket *sock;
struct p_protocol *p;
struct net_conf *nc;
int size, cf;
if (tconn->net_conf->dry_run && tconn->agreed_pro_version < 92) {
conn_err(tconn, "--dry-run is not supported by peer");
return -EOPNOTSUPP;
}
sock = &tconn->data;
p = conn_prepare_command(tconn, sock);
if (!p)
return -EIO;
rcu_read_lock();
nc = rcu_dereference(tconn->net_conf);
if (nc->dry_run && tconn->agreed_pro_version < 92) {
rcu_read_unlock();
mutex_unlock(&sock->mutex);
conn_err(tconn, "--dry-run is not supported by peer");
return -EOPNOTSUPP;
}
size = sizeof(*p);
if (tconn->agreed_pro_version >= 87)
size += strlen(tconn->net_conf->integrity_alg) + 1;
size += strlen(nc->integrity_alg) + 1;
p->protocol = cpu_to_be32(tconn->net_conf->wire_protocol);
p->after_sb_0p = cpu_to_be32(tconn->net_conf->after_sb_0p);
p->after_sb_1p = cpu_to_be32(tconn->net_conf->after_sb_1p);
p->after_sb_2p = cpu_to_be32(tconn->net_conf->after_sb_2p);
p->two_primaries = cpu_to_be32(tconn->net_conf->two_primaries);
p->protocol = cpu_to_be32(nc->wire_protocol);
p->after_sb_0p = cpu_to_be32(nc->after_sb_0p);
p->after_sb_1p = cpu_to_be32(nc->after_sb_1p);
p->after_sb_2p = cpu_to_be32(nc->after_sb_2p);
p->two_primaries = cpu_to_be32(nc->two_primaries);
cf = 0;
if (tconn->net_conf->want_lose)
if (nc->want_lose)
cf |= CF_WANT_LOSE;
if (tconn->net_conf->dry_run)
if (nc->dry_run)
cf |= CF_DRY_RUN;
p->conn_flags = cpu_to_be32(cf);
if (tconn->agreed_pro_version >= 87)
strcpy(p->integrity_alg, tconn->net_conf->integrity_alg);
strcpy(p->integrity_alg, nc->integrity_alg);
rcu_read_unlock();
return conn_send_command(tconn, sock, P_PROTOCOL, size, NULL, 0);
}
......@@ -940,7 +953,9 @@ int _drbd_send_uuids(struct drbd_conf *mdev, u64 uuid_flags)
mdev->comm_bm_set = drbd_bm_total_weight(mdev);
p->uuid[UI_SIZE] = cpu_to_be64(mdev->comm_bm_set);
uuid_flags |= mdev->tconn->net_conf->want_lose ? 1 : 0;
rcu_read_lock();
uuid_flags |= rcu_dereference(mdev->tconn->net_conf)->want_lose ? 1 : 0;
rcu_read_unlock();
uuid_flags |= test_bit(CRASHED_PRIMARY, &mdev->flags) ? 2 : 0;
uuid_flags |= mdev->new_state_tmp.disk == D_INCONSISTENT ? 4 : 0;
p->uuid[UI_FLAGS] = cpu_to_be64(uuid_flags);
......@@ -1136,12 +1151,14 @@ int fill_bitmap_rle_bits(struct drbd_conf *mdev,
unsigned long rl;
unsigned len;
unsigned toggle;
int bits;
int bits, use_rle;
/* may we use this feature? */
if ((mdev->tconn->net_conf->use_rle == 0) ||
(mdev->tconn->agreed_pro_version < 90))
return 0;
rcu_read_lock();
use_rle = rcu_dereference(mdev->tconn->net_conf)->use_rle;
rcu_read_unlock();
if (!use_rle || mdev->tconn->agreed_pro_version < 90)
return 0;
if (c->bit_offset >= c->bm_bits)
return 0; /* nothing to do. */
......@@ -1812,7 +1829,9 @@ int drbd_send(struct drbd_tconn *tconn, struct socket *sock,
msg.msg_flags = msg_flags | MSG_NOSIGNAL;
if (sock == tconn->data.socket) {
tconn->ko_count = tconn->net_conf->ko_count;
rcu_read_lock();
tconn->ko_count = rcu_dereference(tconn->net_conf)->ko_count;
rcu_read_unlock();
drbd_update_congested(tconn);
}
do {
......@@ -3235,15 +3254,18 @@ const char *cmdname(enum drbd_packet cmd)
*/
int drbd_wait_misc(struct drbd_conf *mdev, struct drbd_interval *i)
{
struct net_conf *net_conf = mdev->tconn->net_conf;
struct net_conf *nc;
DEFINE_WAIT(wait);
long timeout;
if (!net_conf)
rcu_read_lock();
nc = rcu_dereference(mdev->tconn->net_conf);
if (!nc) {
rcu_read_unlock();
return -ETIMEDOUT;
timeout = MAX_SCHEDULE_TIMEOUT;
if (net_conf->ko_count)
timeout = net_conf->timeout * HZ / 10 * net_conf->ko_count;
}
timeout = nc->ko_count ? nc->timeout * HZ / 10 * nc->ko_count : MAX_SCHEDULE_TIMEOUT;
rcu_read_unlock();
/* Indicate to wake up mdev->misc_wait on progress. */
i->waiting = true;
......
This diff is collapsed.
......@@ -197,6 +197,8 @@ static int drbd_seq_show(struct seq_file *seq, void *v)
int i, prev_i = -1;
const char *sn;
struct drbd_conf *mdev;
struct net_conf *nc;
char wp;
static char write_ordering_chars[] = {
[WO_none] = 'n',
......@@ -240,6 +242,10 @@ static int drbd_seq_show(struct seq_file *seq, void *v)
mdev->state.role == R_SECONDARY) {
seq_printf(seq, "%2d: cs:Unconfigured\n", i);
} else {
rcu_read_lock();
nc = rcu_dereference(mdev->tconn->net_conf);
wp = nc ? nc->wire_protocol - DRBD_PROT_A + 'A' : ' ';
rcu_read_unlock();
seq_printf(seq,
"%2d: cs:%s ro:%s/%s ds:%s/%s %c %c%c%c%c%c%c\n"
" ns:%u nr:%u dw:%u dr:%u al:%u bm:%u "
......@@ -249,8 +255,7 @@ static int drbd_seq_show(struct seq_file *seq, void *v)
drbd_role_str(mdev->state.peer),
drbd_disk_str(mdev->state.disk),
drbd_disk_str(mdev->state.pdsk),
(mdev->tconn->net_conf == NULL ? ' ' :
(mdev->tconn->net_conf->wire_protocol - DRBD_PROT_A+'A')),
wp,
drbd_suspended(mdev) ? 's' : 'r',
mdev->state.aftr_isp ? 'a' : '-',
mdev->state.peer_isp ? 'p' : '-',
......
This diff is collapsed.
......@@ -323,6 +323,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
struct bio_and_error *m)
{
struct drbd_conf *mdev = req->w.mdev;
struct net_conf *nc;
int p, rv = 0;
if (m)
......@@ -344,7 +345,10 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
* and from w_read_retry_remote */
D_ASSERT(!(req->rq_state & RQ_NET_MASK));
req->rq_state |= RQ_NET_PENDING;
p = mdev->tconn->net_conf->wire_protocol;
rcu_read_lock();
nc = rcu_dereference(mdev->tconn->net_conf);
p = nc->wire_protocol;
rcu_read_unlock();
req->rq_state |=
p == DRBD_PROT_C ? RQ_EXP_WRITE_ACK :
p == DRBD_PROT_B ? RQ_EXP_RECEIVE_ACK : 0;
......@@ -474,7 +478,11 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
drbd_queue_work(&mdev->tconn->data.work, &req->w);
/* close the epoch, in case it outgrew the limit */
if (mdev->tconn->newest_tle->n_writes >= mdev->tconn->net_conf->max_epoch_size)
rcu_read_lock();
nc = rcu_dereference(mdev->tconn->net_conf);
p = nc->max_epoch_size;
rcu_read_unlock();
if (mdev->tconn->newest_tle->n_writes >= p)
queue_barrier(mdev);
break;
......@@ -729,6 +737,7 @@ int __drbd_make_request(struct drbd_conf *mdev, struct bio *bio, unsigned long s
const sector_t sector = bio->bi_sector;
struct drbd_tl_epoch *b = NULL;
struct drbd_request *req;
struct net_conf *nc;
int local, remote, send_oos = 0;
int err;
int ret = 0;
......@@ -935,17 +944,19 @@ int __drbd_make_request(struct drbd_conf *mdev, struct bio *bio, unsigned long s
if (send_oos && drbd_set_out_of_sync(mdev, sector, size))
_req_mod(req, QUEUE_FOR_SEND_OOS);
rcu_read_lock();
nc = rcu_dereference(mdev->tconn->net_conf);
if (remote &&
mdev->tconn->net_conf->on_congestion != OC_BLOCK && mdev->tconn->agreed_pro_version >= 96) {
nc->on_congestion != OC_BLOCK && mdev->tconn->agreed_pro_version >= 96) {
int congested = 0;
if (mdev->tconn->net_conf->cong_fill &&
atomic_read(&mdev->ap_in_flight) >= mdev->tconn->net_conf->cong_fill) {
if (nc->cong_fill &&
atomic_read(&mdev->ap_in_flight) >= nc->cong_fill) {
dev_info(DEV, "Congestion-fill threshold reached\n");
congested = 1;
}
if (mdev->act_log->used >= mdev->tconn->net_conf->cong_extents) {
if (mdev->act_log->used >= nc->cong_extents) {
dev_info(DEV, "Congestion-extents threshold reached\n");
congested = 1;
}
......@@ -953,12 +964,13 @@ int __drbd_make_request(struct drbd_conf *mdev, struct bio *bio, unsigned long s
if (congested) {
queue_barrier(mdev); /* last barrier, after mirrored writes */
if (mdev->tconn->net_conf->on_congestion == OC_PULL_AHEAD)
if (nc->on_congestion == OC_PULL_AHEAD)
_drbd_set_state(_NS(mdev, conn, C_AHEAD), 0, NULL);
else /*mdev->tconn->net_conf->on_congestion == OC_DISCONNECT */
else /*nc->on_congestion == OC_DISCONNECT */
_drbd_set_state(_NS(mdev, conn, C_DISCONNECTING), 0, NULL);
}
}
rcu_read_unlock();
spin_unlock_irq(&mdev->tconn->req_lock);
kfree(b); /* if someone else has beaten us to it... */
......@@ -1058,12 +1070,14 @@ void request_timer_fn(unsigned long data)
struct drbd_tconn *tconn = mdev->tconn;
struct drbd_request *req; /* oldest request */
struct list_head *le;
unsigned long et = 0; /* effective timeout = ko_count * timeout */
struct net_conf *nc;
unsigned long et; /* effective timeout = ko_count * timeout */
rcu_read_lock();
nc = rcu_dereference(tconn->net_conf);
et = nc ? nc->timeout * HZ/10 * nc->ko_count : 0;
rcu_read_unlock();
if (get_net_conf(tconn)) {
et = tconn->net_conf->timeout*HZ/10 * tconn->net_conf->ko_count;
put_net_conf(tconn);
}
if (!et || mdev->state.conn < C_WF_REPORT_PARAMS)
return; /* Recurring timer stopped */
......
......@@ -482,6 +482,7 @@ is_valid_state(struct drbd_conf *mdev, union drbd_state ns)
enum drbd_fencing_p fp;
enum drbd_state_rv rv = SS_SUCCESS;
struct net_conf *nc;
fp = FP_DONT_CARE;
if (get_ldev(mdev)) {
......@@ -489,14 +490,15 @@ is_valid_state(struct drbd_conf *mdev, union drbd_state ns)
put_ldev(mdev);
}
if (get_net_conf(mdev->tconn)) {
if (!mdev->tconn->net_conf->two_primaries && ns.role == R_PRIMARY) {
rcu_read_lock();
nc = rcu_dereference(mdev->tconn->net_conf);
if (nc) {
if (!nc->two_primaries && ns.role == R_PRIMARY) {
if (ns.peer == R_PRIMARY)
rv = SS_TWO_PRIMARIES;
else if (conn_highest_peer(mdev->tconn) == R_PRIMARY)
rv = SS_O_VOL_PEER_PRI;
}
put_net_conf(mdev->tconn);
}
}
if (rv <= 0)
......@@ -531,7 +533,7 @@ is_valid_state(struct drbd_conf *mdev, union drbd_state ns)
rv = SS_CONNECTED_OUTDATES;
else if ((ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T) &&
(mdev->tconn->net_conf->verify_alg[0] == 0))
(nc->verify_alg[0] == 0))
rv = SS_NO_VERIFY_ALG;
else if ((ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T) &&
......@@ -541,6 +543,8 @@ is_valid_state(struct drbd_conf *mdev, union drbd_state ns)
else if (ns.conn >= C_CONNECTED && ns.pdsk == D_UNKNOWN)
rv = SS_CONNECTED_OUTDATES;
rcu_read_unlock();
return rv;
}
......
......@@ -1619,10 +1619,16 @@ void drbd_start_resync(struct drbd_conf *mdev, enum drbd_conns side)
* detect connection loss, then waiting for a ping
* response (implicit in drbd_resync_finished) reduces
* the race considerably, but does not solve it. */
if (side == C_SYNC_SOURCE)
schedule_timeout_interruptible(
mdev->tconn->net_conf->ping_int * HZ +
mdev->tconn->net_conf->ping_timeo*HZ/9);
if (side == C_SYNC_SOURCE) {
struct net_conf *nc;
int timeo;
rcu_read_lock();
nc = rcu_dereference(mdev->tconn->net_conf);
timeo = nc->ping_int * HZ + nc->ping_timeo * HZ / 9;
rcu_read_unlock();
schedule_timeout_interruptible(timeo);
}
drbd_resync_finished(mdev);
}
......@@ -1645,22 +1651,30 @@ int drbd_worker(struct drbd_thread *thi)
struct drbd_tconn *tconn = thi->tconn;
struct drbd_work *w = NULL;
struct drbd_conf *mdev;
struct net_conf *nc;
LIST_HEAD(work_list);
int vnr, intr = 0;
int cork;
while (get_t_state(thi) == RUNNING) {
drbd_thread_current_set_cpu(thi);
if (down_trylock(&tconn->data.work.s)) {
mutex_lock(&tconn->data.mutex);
if (tconn->data.socket && !tconn->net_conf->no_cork)
rcu_read_lock();
nc = rcu_dereference(tconn->net_conf);
cork = nc ? !nc->no_cork : 0;
rcu_read_unlock();
if (tconn->data.socket && cork)
drbd_tcp_uncork(tconn->data.socket);
mutex_unlock(&tconn->data.mutex);
intr = down_interruptible(&tconn->data.work.s);
mutex_lock(&tconn->data.mutex);
if (tconn->data.socket && !tconn->net_conf->no_cork)
if (tconn->data.socket && cork)
drbd_tcp_cork(tconn->data.socket);
mutex_unlock(&tconn->data.mutex);
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment