Commit 8410da8f authored by Philipp Reisner's avatar Philipp Reisner

drbd: Introduced tconn->cstate_mutex

In compatibility mode with old DRBDs, use that as the state_mutex
as well.
Signed-off-by: default avatarPhilipp Reisner <philipp.reisner@linbit.com>
Signed-off-by: default avatarLars Ellenberg <lars.ellenberg@linbit.com>
parent dad20554
...@@ -919,6 +919,7 @@ struct drbd_tconn { /* is a resource from the config file */ ...@@ -919,6 +919,7 @@ struct drbd_tconn { /* is a resource from the config file */
struct list_head all_tconn; /* List of all drbd_tconn, prot by global_state_lock */ struct list_head all_tconn; /* List of all drbd_tconn, prot by global_state_lock */
struct idr volumes; /* <tconn, vnr> to mdev mapping */ struct idr volumes; /* <tconn, vnr> to mdev mapping */
enum drbd_conns cstate; /* Only C_STANDALONE to C_WF_REPORT_PARAMS */ enum drbd_conns cstate; /* Only C_STANDALONE to C_WF_REPORT_PARAMS */
struct mutex cstate_mutex; /* Protects graceful disconnects */
unsigned long flags; unsigned long flags;
struct net_conf *net_conf; /* protected by get_net_conf() and put_net_conf() */ struct net_conf *net_conf; /* protected by get_net_conf() and put_net_conf() */
...@@ -1080,7 +1081,8 @@ struct drbd_conf { ...@@ -1080,7 +1081,8 @@ struct drbd_conf {
unsigned long comm_bm_set; /* communicated number of set bits. */ unsigned long comm_bm_set; /* communicated number of set bits. */
struct bm_io_work bm_io_work; struct bm_io_work bm_io_work;
u64 ed_uuid; /* UUID of the exposed data */ u64 ed_uuid; /* UUID of the exposed data */
struct mutex state_mutex; struct mutex own_state_mutex;
struct mutex *state_mutex; /* either own_state_mutex or mdev->tconn->cstate_mutex */
char congestion_reason; /* Why we where congested... */ char congestion_reason; /* Why we where congested... */
atomic_t rs_sect_in; /* for incoming resync data rate, SyncTarget */ atomic_t rs_sect_in; /* for incoming resync data rate, SyncTarget */
atomic_t rs_sect_ev; /* for submitted resync data rate, both */ atomic_t rs_sect_ev; /* for submitted resync data rate, both */
......
...@@ -1801,7 +1801,8 @@ void drbd_init_set_defaults(struct drbd_conf *mdev) ...@@ -1801,7 +1801,8 @@ void drbd_init_set_defaults(struct drbd_conf *mdev)
atomic_set(&mdev->ap_in_flight, 0); atomic_set(&mdev->ap_in_flight, 0);
mutex_init(&mdev->md_io_mutex); mutex_init(&mdev->md_io_mutex);
mutex_init(&mdev->state_mutex); mutex_init(&mdev->own_state_mutex);
mdev->state_mutex = &mdev->own_state_mutex;
spin_lock_init(&mdev->al_lock); spin_lock_init(&mdev->al_lock);
spin_lock_init(&mdev->peer_seq_lock); spin_lock_init(&mdev->peer_seq_lock);
...@@ -2189,6 +2190,7 @@ struct drbd_tconn *drbd_new_tconn(char *name) ...@@ -2189,6 +2190,7 @@ struct drbd_tconn *drbd_new_tconn(char *name)
goto fail; goto fail;
tconn->cstate = C_STANDALONE; tconn->cstate = C_STANDALONE;
mutex_init(&tconn->cstate_mutex);
spin_lock_init(&tconn->req_lock); spin_lock_init(&tconn->req_lock);
atomic_set(&tconn->net_cnt, 0); atomic_set(&tconn->net_cnt, 0);
init_waitqueue_head(&tconn->net_cnt_wait); init_waitqueue_head(&tconn->net_cnt_wait);
......
...@@ -320,7 +320,7 @@ drbd_set_role(struct drbd_conf *mdev, enum drbd_role new_role, int force) ...@@ -320,7 +320,7 @@ drbd_set_role(struct drbd_conf *mdev, enum drbd_role new_role, int force)
if (new_role == R_PRIMARY) if (new_role == R_PRIMARY)
request_ping(mdev->tconn); /* Detect a dead peer ASAP */ request_ping(mdev->tconn); /* Detect a dead peer ASAP */
mutex_lock(&mdev->state_mutex); mutex_lock(mdev->state_mutex);
mask.i = 0; mask.role = R_MASK; mask.i = 0; mask.role = R_MASK;
val.i = 0; val.role = new_role; val.i = 0; val.role = new_role;
...@@ -439,7 +439,7 @@ drbd_set_role(struct drbd_conf *mdev, enum drbd_role new_role, int force) ...@@ -439,7 +439,7 @@ drbd_set_role(struct drbd_conf *mdev, enum drbd_role new_role, int force)
kobject_uevent(&disk_to_dev(mdev->vdisk)->kobj, KOBJ_CHANGE); kobject_uevent(&disk_to_dev(mdev->vdisk)->kobj, KOBJ_CHANGE);
fail: fail:
mutex_unlock(&mdev->state_mutex); mutex_unlock(mdev->state_mutex);
return rv; return rv;
} }
...@@ -2162,7 +2162,7 @@ static int drbd_nl_new_c_uuid(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nl ...@@ -2162,7 +2162,7 @@ static int drbd_nl_new_c_uuid(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nl
return 0; return 0;
} }
mutex_lock(&mdev->state_mutex); /* Protects us against serialized state changes. */ mutex_lock(mdev->state_mutex); /* Protects us against serialized state changes. */
if (!get_ldev(mdev)) { if (!get_ldev(mdev)) {
retcode = ERR_NO_DISK; retcode = ERR_NO_DISK;
...@@ -2204,7 +2204,7 @@ static int drbd_nl_new_c_uuid(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nl ...@@ -2204,7 +2204,7 @@ static int drbd_nl_new_c_uuid(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nl
out_dec: out_dec:
put_ldev(mdev); put_ldev(mdev);
out: out:
mutex_unlock(&mdev->state_mutex); mutex_unlock(mdev->state_mutex);
reply->ret_code = retcode; reply->ret_code = retcode;
return 0; return 0;
......
...@@ -753,6 +753,10 @@ static int drbd_connected(int vnr, void *p, void *data) ...@@ -753,6 +753,10 @@ static int drbd_connected(int vnr, void *p, void *data)
atomic_set(&mdev->packet_seq, 0); atomic_set(&mdev->packet_seq, 0);
mdev->peer_seq = 0; mdev->peer_seq = 0;
mdev->state_mutex = mdev->tconn->agreed_pro_version < 100 ?
&mdev->tconn->cstate_mutex :
&mdev->own_state_mutex;
ok &= drbd_send_sync_param(mdev, &mdev->sync_conf); ok &= drbd_send_sync_param(mdev, &mdev->sync_conf);
ok &= drbd_send_sizes(mdev, 0, 0); ok &= drbd_send_sizes(mdev, 0, 0);
ok &= drbd_send_uuids(mdev); ok &= drbd_send_uuids(mdev);
...@@ -760,6 +764,7 @@ static int drbd_connected(int vnr, void *p, void *data) ...@@ -760,6 +764,7 @@ static int drbd_connected(int vnr, void *p, void *data)
clear_bit(USE_DEGR_WFC_T, &mdev->flags); clear_bit(USE_DEGR_WFC_T, &mdev->flags);
clear_bit(RESIZE_PENDING, &mdev->flags); clear_bit(RESIZE_PENDING, &mdev->flags);
return !ok; return !ok;
} }
...@@ -3167,8 +3172,8 @@ static int receive_uuids(struct drbd_conf *mdev, enum drbd_packet cmd, ...@@ -3167,8 +3172,8 @@ static int receive_uuids(struct drbd_conf *mdev, enum drbd_packet cmd,
ongoing cluster wide state change is finished. That is important if ongoing cluster wide state change is finished. That is important if
we are primary and are detaching from our disk. We need to see the we are primary and are detaching from our disk. We need to see the
new disk state... */ new disk state... */
mutex_lock(&mdev->state_mutex); mutex_lock(mdev->state_mutex);
mutex_unlock(&mdev->state_mutex); mutex_unlock(mdev->state_mutex);
if (mdev->state.conn >= C_CONNECTED && mdev->state.disk < D_INCONSISTENT) if (mdev->state.conn >= C_CONNECTED && mdev->state.disk < D_INCONSISTENT)
updated_uuids |= drbd_set_ed_uuid(mdev, p_uuid[UI_CURRENT]); updated_uuids |= drbd_set_ed_uuid(mdev, p_uuid[UI_CURRENT]);
...@@ -3219,7 +3224,7 @@ static int receive_req_state(struct drbd_conf *mdev, enum drbd_packet cmd, ...@@ -3219,7 +3224,7 @@ static int receive_req_state(struct drbd_conf *mdev, enum drbd_packet cmd,
val.i = be32_to_cpu(p->val); val.i = be32_to_cpu(p->val);
if (test_bit(DISCARD_CONCURRENT, &mdev->tconn->flags) && if (test_bit(DISCARD_CONCURRENT, &mdev->tconn->flags) &&
mutex_is_locked(&mdev->state_mutex)) { mutex_is_locked(mdev->state_mutex)) {
drbd_send_sr_reply(mdev, SS_CONCURRENT_ST_CHG); drbd_send_sr_reply(mdev, SS_CONCURRENT_ST_CHG);
return true; return true;
} }
......
...@@ -163,7 +163,7 @@ drbd_req_state(struct drbd_conf *mdev, union drbd_state mask, ...@@ -163,7 +163,7 @@ drbd_req_state(struct drbd_conf *mdev, union drbd_state mask,
init_completion(&done); init_completion(&done);
if (f & CS_SERIALIZE) if (f & CS_SERIALIZE)
mutex_lock(&mdev->state_mutex); mutex_lock(mdev->state_mutex);
spin_lock_irqsave(&mdev->tconn->req_lock, flags); spin_lock_irqsave(&mdev->tconn->req_lock, flags);
os = mdev->state; os = mdev->state;
...@@ -215,7 +215,7 @@ drbd_req_state(struct drbd_conf *mdev, union drbd_state mask, ...@@ -215,7 +215,7 @@ drbd_req_state(struct drbd_conf *mdev, union drbd_state mask,
abort: abort:
if (f & CS_SERIALIZE) if (f & CS_SERIALIZE)
mutex_unlock(&mdev->state_mutex); mutex_unlock(mdev->state_mutex);
return rv; return rv;
} }
......
...@@ -1538,19 +1538,19 @@ void drbd_start_resync(struct drbd_conf *mdev, enum drbd_conns side) ...@@ -1538,19 +1538,19 @@ void drbd_start_resync(struct drbd_conf *mdev, enum drbd_conns side)
if (current == mdev->tconn->worker.task) { if (current == mdev->tconn->worker.task) {
/* The worker should not sleep waiting for state_mutex, /* The worker should not sleep waiting for state_mutex,
that can take long */ that can take long */
if (!mutex_trylock(&mdev->state_mutex)) { if (!mutex_trylock(mdev->state_mutex)) {
set_bit(B_RS_H_DONE, &mdev->flags); set_bit(B_RS_H_DONE, &mdev->flags);
mdev->start_resync_timer.expires = jiffies + HZ/5; mdev->start_resync_timer.expires = jiffies + HZ/5;
add_timer(&mdev->start_resync_timer); add_timer(&mdev->start_resync_timer);
return; return;
} }
} else { } else {
mutex_lock(&mdev->state_mutex); mutex_lock(mdev->state_mutex);
} }
clear_bit(B_RS_H_DONE, &mdev->flags); clear_bit(B_RS_H_DONE, &mdev->flags);
if (!get_ldev_if_state(mdev, D_NEGOTIATING)) { if (!get_ldev_if_state(mdev, D_NEGOTIATING)) {
mutex_unlock(&mdev->state_mutex); mutex_unlock(mdev->state_mutex);
return; return;
} }
...@@ -1639,7 +1639,7 @@ void drbd_start_resync(struct drbd_conf *mdev, enum drbd_conns side) ...@@ -1639,7 +1639,7 @@ void drbd_start_resync(struct drbd_conf *mdev, enum drbd_conns side)
drbd_md_sync(mdev); drbd_md_sync(mdev);
} }
put_ldev(mdev); put_ldev(mdev);
mutex_unlock(&mdev->state_mutex); mutex_unlock(mdev->state_mutex);
} }
static int _worker_dying(int vnr, void *p, void *data) static int _worker_dying(int vnr, void *p, void *data)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment