Commit 87eeee41 authored by Philipp Reisner's avatar Philipp Reisner

drbd: moved req_lock and transfer log from mdev to tconn

sed -i \
       -e 's/mdev->req_lock/mdev->tconn->req_lock/g' \
       -e 's/mdev->unused_spare_tle/mdev->tconn->unused_spare_tle/g' \
       -e 's/mdev->newest_tle/mdev->tconn->newest_tle/g' \
       -e 's/mdev->oldest_tle/mdev->tconn->oldest_tle/g' \
       -e 's/mdev->out_of_sequence_requests/mdev->tconn->out_of_sequence_requests/g' \
       *.[ch]
Signed-off-by: default avatarPhilipp Reisner <philipp.reisner@linbit.com>
Signed-off-by: default avatarLars Ellenberg <lars.ellenberg@linbit.com>
parent 31890f4a
...@@ -976,6 +976,12 @@ struct drbd_tconn { /* is a resource from the config file */ ...@@ -976,6 +976,12 @@ struct drbd_tconn { /* is a resource from the config file */
unsigned long last_received; /* in jiffies, either socket */ unsigned long last_received; /* in jiffies, either socket */
unsigned int ko_count; unsigned int ko_count;
spinlock_t req_lock;
struct drbd_tl_epoch *unused_spare_tle; /* for pre-allocation */
struct drbd_tl_epoch *newest_tle;
struct drbd_tl_epoch *oldest_tle;
struct list_head out_of_sequence_requests;
struct drbd_thread receiver; struct drbd_thread receiver;
struct drbd_thread worker; struct drbd_thread worker;
struct drbd_thread asender; struct drbd_thread asender;
...@@ -1031,12 +1037,6 @@ struct drbd_conf { ...@@ -1031,12 +1037,6 @@ struct drbd_conf {
atomic_t unacked_cnt; /* Need to send replys for */ atomic_t unacked_cnt; /* Need to send replys for */
atomic_t local_cnt; /* Waiting for local completion */ atomic_t local_cnt; /* Waiting for local completion */
spinlock_t req_lock;
struct drbd_tl_epoch *unused_spare_tle; /* for pre-allocation */
struct drbd_tl_epoch *newest_tle;
struct drbd_tl_epoch *oldest_tle;
struct list_head out_of_sequence_requests;
/* Interval tree of pending local requests */ /* Interval tree of pending local requests */
struct rb_root read_requests; struct rb_root read_requests;
struct rb_root write_requests; struct rb_root write_requests;
...@@ -1868,9 +1868,9 @@ static inline void drbd_chk_io_error_(struct drbd_conf *mdev, ...@@ -1868,9 +1868,9 @@ static inline void drbd_chk_io_error_(struct drbd_conf *mdev,
{ {
if (error) { if (error) {
unsigned long flags; unsigned long flags;
spin_lock_irqsave(&mdev->req_lock, flags); spin_lock_irqsave(&mdev->tconn->req_lock, flags);
__drbd_chk_io_error_(mdev, forcedetach, where); __drbd_chk_io_error_(mdev, forcedetach, where);
spin_unlock_irqrestore(&mdev->req_lock, flags); spin_unlock_irqrestore(&mdev->tconn->req_lock, flags);
} }
} }
...@@ -2366,11 +2366,11 @@ static inline bool inc_ap_bio_cond(struct drbd_conf *mdev, int count) ...@@ -2366,11 +2366,11 @@ static inline bool inc_ap_bio_cond(struct drbd_conf *mdev, int count)
{ {
bool rv = false; bool rv = false;
spin_lock_irq(&mdev->req_lock); spin_lock_irq(&mdev->tconn->req_lock);
rv = may_inc_ap_bio(mdev); rv = may_inc_ap_bio(mdev);
if (rv) if (rv)
atomic_add(count, &mdev->ap_bio_cnt); atomic_add(count, &mdev->ap_bio_cnt);
spin_unlock_irq(&mdev->req_lock); spin_unlock_irq(&mdev->tconn->req_lock);
return rv; return rv;
} }
......
This diff is collapsed.
...@@ -287,13 +287,13 @@ static int _try_outdate_peer_async(void *data) ...@@ -287,13 +287,13 @@ static int _try_outdate_peer_async(void *data)
pdsk == D_INCONSISTENT while conn >= C_CONNECTED is valid, pdsk == D_INCONSISTENT while conn >= C_CONNECTED is valid,
therefore we have to have the pre state change check here. therefore we have to have the pre state change check here.
*/ */
spin_lock_irq(&mdev->req_lock); spin_lock_irq(&mdev->tconn->req_lock);
ns = mdev->state; ns = mdev->state;
if (ns.conn < C_WF_REPORT_PARAMS) { if (ns.conn < C_WF_REPORT_PARAMS) {
ns.pdsk = nps; ns.pdsk = nps;
_drbd_set_state(mdev, ns, CS_VERBOSE, NULL); _drbd_set_state(mdev, ns, CS_VERBOSE, NULL);
} }
spin_unlock_irq(&mdev->req_lock); spin_unlock_irq(&mdev->tconn->req_lock);
return 0; return 0;
} }
...@@ -884,7 +884,7 @@ static void drbd_reconfig_start(struct drbd_conf *mdev) ...@@ -884,7 +884,7 @@ static void drbd_reconfig_start(struct drbd_conf *mdev)
* wakes potential waiters */ * wakes potential waiters */
static void drbd_reconfig_done(struct drbd_conf *mdev) static void drbd_reconfig_done(struct drbd_conf *mdev)
{ {
spin_lock_irq(&mdev->req_lock); spin_lock_irq(&mdev->tconn->req_lock);
if (mdev->state.disk == D_DISKLESS && if (mdev->state.disk == D_DISKLESS &&
mdev->state.conn == C_STANDALONE && mdev->state.conn == C_STANDALONE &&
mdev->state.role == R_SECONDARY) { mdev->state.role == R_SECONDARY) {
...@@ -892,7 +892,7 @@ static void drbd_reconfig_done(struct drbd_conf *mdev) ...@@ -892,7 +892,7 @@ static void drbd_reconfig_done(struct drbd_conf *mdev)
drbd_thread_stop_nowait(&mdev->tconn->worker); drbd_thread_stop_nowait(&mdev->tconn->worker);
} else } else
clear_bit(CONFIG_PENDING, &mdev->flags); clear_bit(CONFIG_PENDING, &mdev->flags);
spin_unlock_irq(&mdev->req_lock); spin_unlock_irq(&mdev->tconn->req_lock);
wake_up(&mdev->state_wait); wake_up(&mdev->state_wait);
} }
...@@ -909,11 +909,11 @@ static void drbd_suspend_al(struct drbd_conf *mdev) ...@@ -909,11 +909,11 @@ static void drbd_suspend_al(struct drbd_conf *mdev)
return; return;
} }
spin_lock_irq(&mdev->req_lock); spin_lock_irq(&mdev->tconn->req_lock);
if (mdev->state.conn < C_CONNECTED) if (mdev->state.conn < C_CONNECTED)
s = !test_and_set_bit(AL_SUSPENDED, &mdev->flags); s = !test_and_set_bit(AL_SUSPENDED, &mdev->flags);
spin_unlock_irq(&mdev->req_lock); spin_unlock_irq(&mdev->tconn->req_lock);
if (s) if (s)
dev_info(DEV, "Suspended AL updates\n"); dev_info(DEV, "Suspended AL updates\n");
...@@ -1240,7 +1240,7 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp ...@@ -1240,7 +1240,7 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
if (_drbd_bm_total_weight(mdev) == drbd_bm_bits(mdev)) if (_drbd_bm_total_weight(mdev) == drbd_bm_bits(mdev))
drbd_suspend_al(mdev); /* IO is still suspended here... */ drbd_suspend_al(mdev); /* IO is still suspended here... */
spin_lock_irq(&mdev->req_lock); spin_lock_irq(&mdev->tconn->req_lock);
os = mdev->state; os = mdev->state;
ns.i = os.i; ns.i = os.i;
/* If MDF_CONSISTENT is not set go into inconsistent state, /* If MDF_CONSISTENT is not set go into inconsistent state,
...@@ -1285,7 +1285,7 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp ...@@ -1285,7 +1285,7 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
rv = _drbd_set_state(mdev, ns, CS_VERBOSE, NULL); rv = _drbd_set_state(mdev, ns, CS_VERBOSE, NULL);
ns = mdev->state; ns = mdev->state;
spin_unlock_irq(&mdev->req_lock); spin_unlock_irq(&mdev->tconn->req_lock);
if (rv < SS_SUCCESS) if (rv < SS_SUCCESS)
goto force_diskless_dec; goto force_diskless_dec;
...@@ -1521,10 +1521,10 @@ static int drbd_nl_net_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, ...@@ -1521,10 +1521,10 @@ static int drbd_nl_net_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
} }
drbd_flush_workqueue(mdev); drbd_flush_workqueue(mdev);
spin_lock_irq(&mdev->req_lock); spin_lock_irq(&mdev->tconn->req_lock);
if (mdev->tconn->net_conf != NULL) { if (mdev->tconn->net_conf != NULL) {
retcode = ERR_NET_CONFIGURED; retcode = ERR_NET_CONFIGURED;
spin_unlock_irq(&mdev->req_lock); spin_unlock_irq(&mdev->tconn->req_lock);
goto fail; goto fail;
} }
mdev->tconn->net_conf = new_conf; mdev->tconn->net_conf = new_conf;
...@@ -1548,7 +1548,7 @@ static int drbd_nl_net_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, ...@@ -1548,7 +1548,7 @@ static int drbd_nl_net_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
mdev->int_dig_in=int_dig_in; mdev->int_dig_in=int_dig_in;
mdev->int_dig_vv=int_dig_vv; mdev->int_dig_vv=int_dig_vv;
retcode = _drbd_set_state(_NS(mdev, conn, C_UNCONNECTED), CS_VERBOSE, NULL); retcode = _drbd_set_state(_NS(mdev, conn, C_UNCONNECTED), CS_VERBOSE, NULL);
spin_unlock_irq(&mdev->req_lock); spin_unlock_irq(&mdev->tconn->req_lock);
kobject_uevent(&disk_to_dev(mdev->vdisk)->kobj, KOBJ_CHANGE); kobject_uevent(&disk_to_dev(mdev->vdisk)->kobj, KOBJ_CHANGE);
reply->ret_code = retcode; reply->ret_code = retcode;
...@@ -1582,10 +1582,10 @@ static int drbd_nl_disconnect(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nl ...@@ -1582,10 +1582,10 @@ static int drbd_nl_disconnect(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nl
} }
if (dc.force) { if (dc.force) {
spin_lock_irq(&mdev->req_lock); spin_lock_irq(&mdev->tconn->req_lock);
if (mdev->state.conn >= C_WF_CONNECTION) if (mdev->state.conn >= C_WF_CONNECTION)
_drbd_set_state(_NS(mdev, conn, C_DISCONNECTING), CS_HARD, NULL); _drbd_set_state(_NS(mdev, conn, C_DISCONNECTING), CS_HARD, NULL);
spin_unlock_irq(&mdev->req_lock); spin_unlock_irq(&mdev->tconn->req_lock);
goto done; goto done;
} }
...@@ -1917,10 +1917,10 @@ static int drbd_nl_invalidate(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nl ...@@ -1917,10 +1917,10 @@ static int drbd_nl_invalidate(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nl
retcode = drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_T)); retcode = drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_T));
while (retcode == SS_NEED_CONNECTION) { while (retcode == SS_NEED_CONNECTION) {
spin_lock_irq(&mdev->req_lock); spin_lock_irq(&mdev->tconn->req_lock);
if (mdev->state.conn < C_CONNECTED) if (mdev->state.conn < C_CONNECTED)
retcode = _drbd_set_state(_NS(mdev, disk, D_INCONSISTENT), CS_VERBOSE, NULL); retcode = _drbd_set_state(_NS(mdev, disk, D_INCONSISTENT), CS_VERBOSE, NULL);
spin_unlock_irq(&mdev->req_lock); spin_unlock_irq(&mdev->tconn->req_lock);
if (retcode != SS_NEED_CONNECTION) if (retcode != SS_NEED_CONNECTION)
break; break;
...@@ -2193,10 +2193,10 @@ static int drbd_nl_new_c_uuid(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nl ...@@ -2193,10 +2193,10 @@ static int drbd_nl_new_c_uuid(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nl
drbd_send_uuids_skip_initial_sync(mdev); drbd_send_uuids_skip_initial_sync(mdev);
_drbd_uuid_set(mdev, UI_BITMAP, 0); _drbd_uuid_set(mdev, UI_BITMAP, 0);
drbd_print_uuids(mdev, "cleared bitmap UUID"); drbd_print_uuids(mdev, "cleared bitmap UUID");
spin_lock_irq(&mdev->req_lock); spin_lock_irq(&mdev->tconn->req_lock);
_drbd_set_state(_NS2(mdev, disk, D_UP_TO_DATE, pdsk, D_UP_TO_DATE), _drbd_set_state(_NS2(mdev, disk, D_UP_TO_DATE, pdsk, D_UP_TO_DATE),
CS_VERBOSE, NULL); CS_VERBOSE, NULL);
spin_unlock_irq(&mdev->req_lock); spin_unlock_irq(&mdev->tconn->req_lock);
} }
} }
......
This diff is collapsed.
...@@ -120,7 +120,7 @@ static void queue_barrier(struct drbd_conf *mdev) ...@@ -120,7 +120,7 @@ static void queue_barrier(struct drbd_conf *mdev)
if (test_bit(CREATE_BARRIER, &mdev->flags)) if (test_bit(CREATE_BARRIER, &mdev->flags))
return; return;
b = mdev->newest_tle; b = mdev->tconn->newest_tle;
b->w.cb = w_send_barrier; b->w.cb = w_send_barrier;
/* inc_ap_pending done here, so we won't /* inc_ap_pending done here, so we won't
* get imbalanced on connection loss. * get imbalanced on connection loss.
...@@ -144,7 +144,7 @@ static void _about_to_complete_local_write(struct drbd_conf *mdev, ...@@ -144,7 +144,7 @@ static void _about_to_complete_local_write(struct drbd_conf *mdev,
*/ */
if (mdev->state.conn >= C_CONNECTED && if (mdev->state.conn >= C_CONNECTED &&
(s & RQ_NET_SENT) != 0 && (s & RQ_NET_SENT) != 0 &&
req->epoch == mdev->newest_tle->br_number) req->epoch == mdev->tconn->newest_tle->br_number)
queue_barrier(mdev); queue_barrier(mdev);
/* we need to do the conflict detection stuff, /* we need to do the conflict detection stuff,
...@@ -516,10 +516,10 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what, ...@@ -516,10 +516,10 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
* just after it grabs the req_lock */ * just after it grabs the req_lock */
D_ASSERT(test_bit(CREATE_BARRIER, &mdev->flags) == 0); D_ASSERT(test_bit(CREATE_BARRIER, &mdev->flags) == 0);
req->epoch = mdev->newest_tle->br_number; req->epoch = mdev->tconn->newest_tle->br_number;
/* increment size of current epoch */ /* increment size of current epoch */
mdev->newest_tle->n_writes++; mdev->tconn->newest_tle->n_writes++;
/* queue work item to send data */ /* queue work item to send data */
D_ASSERT(req->rq_state & RQ_NET_PENDING); D_ASSERT(req->rq_state & RQ_NET_PENDING);
...@@ -528,7 +528,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what, ...@@ -528,7 +528,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
drbd_queue_work(&mdev->tconn->data.work, &req->w); drbd_queue_work(&mdev->tconn->data.work, &req->w);
/* close the epoch, in case it outgrew the limit */ /* close the epoch, in case it outgrew the limit */
if (mdev->newest_tle->n_writes >= mdev->tconn->net_conf->max_epoch_size) if (mdev->tconn->newest_tle->n_writes >= mdev->tconn->net_conf->max_epoch_size)
queue_barrier(mdev); queue_barrier(mdev);
break; break;
...@@ -693,7 +693,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what, ...@@ -693,7 +693,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
* this is bad, because if the connection is lost now, * this is bad, because if the connection is lost now,
* we won't be able to clean them up... */ * we won't be able to clean them up... */
dev_err(DEV, "FIXME (BARRIER_ACKED but pending)\n"); dev_err(DEV, "FIXME (BARRIER_ACKED but pending)\n");
list_move(&req->tl_requests, &mdev->out_of_sequence_requests); list_move(&req->tl_requests, &mdev->tconn->out_of_sequence_requests);
} }
if ((req->rq_state & RQ_NET_MASK) != 0) { if ((req->rq_state & RQ_NET_MASK) != 0) {
req->rq_state |= RQ_NET_DONE; req->rq_state |= RQ_NET_DONE;
...@@ -834,7 +834,7 @@ static int drbd_make_request_common(struct drbd_conf *mdev, struct bio *bio, uns ...@@ -834,7 +834,7 @@ static int drbd_make_request_common(struct drbd_conf *mdev, struct bio *bio, uns
* spinlock, and grabbing the spinlock. * spinlock, and grabbing the spinlock.
* if we lost that race, we retry. */ * if we lost that race, we retry. */
if (rw == WRITE && (remote || send_oos) && if (rw == WRITE && (remote || send_oos) &&
mdev->unused_spare_tle == NULL && mdev->tconn->unused_spare_tle == NULL &&
test_bit(CREATE_BARRIER, &mdev->flags)) { test_bit(CREATE_BARRIER, &mdev->flags)) {
allocate_barrier: allocate_barrier:
b = kmalloc(sizeof(struct drbd_tl_epoch), GFP_NOIO); b = kmalloc(sizeof(struct drbd_tl_epoch), GFP_NOIO);
...@@ -846,7 +846,7 @@ static int drbd_make_request_common(struct drbd_conf *mdev, struct bio *bio, uns ...@@ -846,7 +846,7 @@ static int drbd_make_request_common(struct drbd_conf *mdev, struct bio *bio, uns
} }
/* GOOD, everything prepared, grab the spin_lock */ /* GOOD, everything prepared, grab the spin_lock */
spin_lock_irq(&mdev->req_lock); spin_lock_irq(&mdev->tconn->req_lock);
if (is_susp(mdev->state)) { if (is_susp(mdev->state)) {
/* If we got suspended, use the retry mechanism of /* If we got suspended, use the retry mechanism of
...@@ -854,7 +854,7 @@ static int drbd_make_request_common(struct drbd_conf *mdev, struct bio *bio, uns ...@@ -854,7 +854,7 @@ static int drbd_make_request_common(struct drbd_conf *mdev, struct bio *bio, uns
bio. In the next call to drbd_make_request bio. In the next call to drbd_make_request
we sleep in inc_ap_bio() */ we sleep in inc_ap_bio() */
ret = 1; ret = 1;
spin_unlock_irq(&mdev->req_lock); spin_unlock_irq(&mdev->tconn->req_lock);
goto fail_free_complete; goto fail_free_complete;
} }
...@@ -867,21 +867,21 @@ static int drbd_make_request_common(struct drbd_conf *mdev, struct bio *bio, uns ...@@ -867,21 +867,21 @@ static int drbd_make_request_common(struct drbd_conf *mdev, struct bio *bio, uns
dev_warn(DEV, "lost connection while grabbing the req_lock!\n"); dev_warn(DEV, "lost connection while grabbing the req_lock!\n");
if (!(local || remote)) { if (!(local || remote)) {
dev_err(DEV, "IO ERROR: neither local nor remote disk\n"); dev_err(DEV, "IO ERROR: neither local nor remote disk\n");
spin_unlock_irq(&mdev->req_lock); spin_unlock_irq(&mdev->tconn->req_lock);
goto fail_free_complete; goto fail_free_complete;
} }
} }
if (b && mdev->unused_spare_tle == NULL) { if (b && mdev->tconn->unused_spare_tle == NULL) {
mdev->unused_spare_tle = b; mdev->tconn->unused_spare_tle = b;
b = NULL; b = NULL;
} }
if (rw == WRITE && (remote || send_oos) && if (rw == WRITE && (remote || send_oos) &&
mdev->unused_spare_tle == NULL && mdev->tconn->unused_spare_tle == NULL &&
test_bit(CREATE_BARRIER, &mdev->flags)) { test_bit(CREATE_BARRIER, &mdev->flags)) {
/* someone closed the current epoch /* someone closed the current epoch
* while we were grabbing the spinlock */ * while we were grabbing the spinlock */
spin_unlock_irq(&mdev->req_lock); spin_unlock_irq(&mdev->tconn->req_lock);
goto allocate_barrier; goto allocate_barrier;
} }
...@@ -899,10 +899,10 @@ static int drbd_make_request_common(struct drbd_conf *mdev, struct bio *bio, uns ...@@ -899,10 +899,10 @@ static int drbd_make_request_common(struct drbd_conf *mdev, struct bio *bio, uns
* barrier packet. To get the write ordering right, we only have to * barrier packet. To get the write ordering right, we only have to
* make sure that, if this is a write request and it triggered a * make sure that, if this is a write request and it triggered a
* barrier packet, this request is queued within the same spinlock. */ * barrier packet, this request is queued within the same spinlock. */
if ((remote || send_oos) && mdev->unused_spare_tle && if ((remote || send_oos) && mdev->tconn->unused_spare_tle &&
test_and_clear_bit(CREATE_BARRIER, &mdev->flags)) { test_and_clear_bit(CREATE_BARRIER, &mdev->flags)) {
_tl_add_barrier(mdev, mdev->unused_spare_tle); _tl_add_barrier(mdev, mdev->tconn->unused_spare_tle);
mdev->unused_spare_tle = NULL; mdev->tconn->unused_spare_tle = NULL;
} else { } else {
D_ASSERT(!(remote && rw == WRITE && D_ASSERT(!(remote && rw == WRITE &&
test_bit(CREATE_BARRIER, &mdev->flags))); test_bit(CREATE_BARRIER, &mdev->flags)));
...@@ -934,7 +934,7 @@ static int drbd_make_request_common(struct drbd_conf *mdev, struct bio *bio, uns ...@@ -934,7 +934,7 @@ static int drbd_make_request_common(struct drbd_conf *mdev, struct bio *bio, uns
if (rw == WRITE && _req_conflicts(req)) if (rw == WRITE && _req_conflicts(req))
goto fail_conflicting; goto fail_conflicting;
list_add_tail(&req->tl_requests, &mdev->newest_tle->requests); list_add_tail(&req->tl_requests, &mdev->tconn->newest_tle->requests);
/* NOTE remote first: to get the concurrent write detection right, /* NOTE remote first: to get the concurrent write detection right,
* we must register the request before start of local IO. */ * we must register the request before start of local IO. */
...@@ -975,7 +975,7 @@ static int drbd_make_request_common(struct drbd_conf *mdev, struct bio *bio, uns ...@@ -975,7 +975,7 @@ static int drbd_make_request_common(struct drbd_conf *mdev, struct bio *bio, uns
} }
} }
spin_unlock_irq(&mdev->req_lock); spin_unlock_irq(&mdev->tconn->req_lock);
kfree(b); /* if someone else has beaten us to it... */ kfree(b); /* if someone else has beaten us to it... */
if (local) { if (local) {
...@@ -1008,7 +1008,7 @@ static int drbd_make_request_common(struct drbd_conf *mdev, struct bio *bio, uns ...@@ -1008,7 +1008,7 @@ static int drbd_make_request_common(struct drbd_conf *mdev, struct bio *bio, uns
* pretend that it was successfully served right now. * pretend that it was successfully served right now.
*/ */
_drbd_end_io_acct(mdev, req); _drbd_end_io_acct(mdev, req);
spin_unlock_irq(&mdev->req_lock); spin_unlock_irq(&mdev->tconn->req_lock);
if (remote) if (remote)
dec_ap_pending(mdev); dec_ap_pending(mdev);
/* THINK: do we want to fail it (-EIO), or pretend success? /* THINK: do we want to fail it (-EIO), or pretend success?
...@@ -1188,10 +1188,10 @@ void request_timer_fn(unsigned long data) ...@@ -1188,10 +1188,10 @@ void request_timer_fn(unsigned long data)
if (!et || mdev->state.conn < C_WF_REPORT_PARAMS) if (!et || mdev->state.conn < C_WF_REPORT_PARAMS)
return; /* Recurring timer stopped */ return; /* Recurring timer stopped */
spin_lock_irq(&mdev->req_lock); spin_lock_irq(&mdev->tconn->req_lock);
le = &mdev->oldest_tle->requests; le = &mdev->tconn->oldest_tle->requests;
if (list_empty(le)) { if (list_empty(le)) {
spin_unlock_irq(&mdev->req_lock); spin_unlock_irq(&mdev->tconn->req_lock);
mod_timer(&mdev->request_timer, jiffies + et); mod_timer(&mdev->request_timer, jiffies + et);
return; return;
} }
...@@ -1210,5 +1210,5 @@ void request_timer_fn(unsigned long data) ...@@ -1210,5 +1210,5 @@ void request_timer_fn(unsigned long data)
mod_timer(&mdev->request_timer, req->start_time + et); mod_timer(&mdev->request_timer, req->start_time + et);
} }
spin_unlock_irq(&mdev->req_lock); spin_unlock_irq(&mdev->tconn->req_lock);
} }
...@@ -305,9 +305,9 @@ static inline int req_mod(struct drbd_request *req, ...@@ -305,9 +305,9 @@ static inline int req_mod(struct drbd_request *req,
struct bio_and_error m; struct bio_and_error m;
int rv; int rv;
spin_lock_irqsave(&mdev->req_lock, flags); spin_lock_irqsave(&mdev->tconn->req_lock, flags);
rv = __req_mod(req, what, &m); rv = __req_mod(req, what, &m);
spin_unlock_irqrestore(&mdev->req_lock, flags); spin_unlock_irqrestore(&mdev->tconn->req_lock, flags);
if (m.bio) if (m.bio)
complete_master_bio(mdev, &m); complete_master_bio(mdev, &m);
......
...@@ -85,14 +85,14 @@ void drbd_endio_read_sec_final(struct drbd_epoch_entry *e) __releases(local) ...@@ -85,14 +85,14 @@ void drbd_endio_read_sec_final(struct drbd_epoch_entry *e) __releases(local)
unsigned long flags = 0; unsigned long flags = 0;
struct drbd_conf *mdev = e->mdev; struct drbd_conf *mdev = e->mdev;
spin_lock_irqsave(&mdev->req_lock, flags); spin_lock_irqsave(&mdev->tconn->req_lock, flags);
mdev->read_cnt += e->i.size >> 9; mdev->read_cnt += e->i.size >> 9;
list_del(&e->w.list); list_del(&e->w.list);
if (list_empty(&mdev->read_ee)) if (list_empty(&mdev->read_ee))
wake_up(&mdev->ee_wait); wake_up(&mdev->ee_wait);
if (test_bit(__EE_WAS_ERROR, &e->flags)) if (test_bit(__EE_WAS_ERROR, &e->flags))
__drbd_chk_io_error(mdev, false); __drbd_chk_io_error(mdev, false);
spin_unlock_irqrestore(&mdev->req_lock, flags); spin_unlock_irqrestore(&mdev->tconn->req_lock, flags);
drbd_queue_work(&mdev->tconn->data.work, &e->w); drbd_queue_work(&mdev->tconn->data.work, &e->w);
put_ldev(mdev); put_ldev(mdev);
...@@ -117,7 +117,7 @@ static void drbd_endio_write_sec_final(struct drbd_epoch_entry *e) __releases(lo ...@@ -117,7 +117,7 @@ static void drbd_endio_write_sec_final(struct drbd_epoch_entry *e) __releases(lo
do_al_complete_io = e->flags & EE_CALL_AL_COMPLETE_IO; do_al_complete_io = e->flags & EE_CALL_AL_COMPLETE_IO;
block_id = e->block_id; block_id = e->block_id;
spin_lock_irqsave(&mdev->req_lock, flags); spin_lock_irqsave(&mdev->tconn->req_lock, flags);
mdev->writ_cnt += e->i.size >> 9; mdev->writ_cnt += e->i.size >> 9;
list_del(&e->w.list); /* has been on active_ee or sync_ee */ list_del(&e->w.list); /* has been on active_ee or sync_ee */
list_add_tail(&e->w.list, &mdev->done_ee); list_add_tail(&e->w.list, &mdev->done_ee);
...@@ -134,7 +134,7 @@ static void drbd_endio_write_sec_final(struct drbd_epoch_entry *e) __releases(lo ...@@ -134,7 +134,7 @@ static void drbd_endio_write_sec_final(struct drbd_epoch_entry *e) __releases(lo
if (test_bit(__EE_WAS_ERROR, &e->flags)) if (test_bit(__EE_WAS_ERROR, &e->flags))
__drbd_chk_io_error(mdev, false); __drbd_chk_io_error(mdev, false);
spin_unlock_irqrestore(&mdev->req_lock, flags); spin_unlock_irqrestore(&mdev->tconn->req_lock, flags);
if (block_id == ID_SYNCER) if (block_id == ID_SYNCER)
drbd_rs_complete_io(mdev, e_sector); drbd_rs_complete_io(mdev, e_sector);
...@@ -220,9 +220,9 @@ void drbd_endio_pri(struct bio *bio, int error) ...@@ -220,9 +220,9 @@ void drbd_endio_pri(struct bio *bio, int error)
req->private_bio = ERR_PTR(error); req->private_bio = ERR_PTR(error);
/* not req_mod(), we need irqsave here! */ /* not req_mod(), we need irqsave here! */
spin_lock_irqsave(&mdev->req_lock, flags); spin_lock_irqsave(&mdev->tconn->req_lock, flags);
__req_mod(req, what, &m); __req_mod(req, what, &m);
spin_unlock_irqrestore(&mdev->req_lock, flags); spin_unlock_irqrestore(&mdev->tconn->req_lock, flags);
if (m.bio) if (m.bio)
complete_master_bio(mdev, &m); complete_master_bio(mdev, &m);
...@@ -236,13 +236,13 @@ int w_read_retry_remote(struct drbd_conf *mdev, struct drbd_work *w, int cancel) ...@@ -236,13 +236,13 @@ int w_read_retry_remote(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
* but try to WRITE the P_DATA_REPLY to the failed location, * but try to WRITE the P_DATA_REPLY to the failed location,
* to give the disk the chance to relocate that block */ * to give the disk the chance to relocate that block */
spin_lock_irq(&mdev->req_lock); spin_lock_irq(&mdev->tconn->req_lock);
if (cancel || mdev->state.pdsk != D_UP_TO_DATE) { if (cancel || mdev->state.pdsk != D_UP_TO_DATE) {
_req_mod(req, READ_RETRY_REMOTE_CANCELED); _req_mod(req, READ_RETRY_REMOTE_CANCELED);
spin_unlock_irq(&mdev->req_lock); spin_unlock_irq(&mdev->tconn->req_lock);
return 1; return 1;
} }
spin_unlock_irq(&mdev->req_lock); spin_unlock_irq(&mdev->tconn->req_lock);
return w_send_read_req(mdev, w, 0); return w_send_read_req(mdev, w, 0);
} }
...@@ -359,9 +359,9 @@ static int read_for_csum(struct drbd_conf *mdev, sector_t sector, int size) ...@@ -359,9 +359,9 @@ static int read_for_csum(struct drbd_conf *mdev, sector_t sector, int size)
goto defer; goto defer;
e->w.cb = w_e_send_csum; e->w.cb = w_e_send_csum;
spin_lock_irq(&mdev->req_lock); spin_lock_irq(&mdev->tconn->req_lock);
list_add(&e->w.list, &mdev->read_ee); list_add(&e->w.list, &mdev->read_ee);
spin_unlock_irq(&mdev->req_lock); spin_unlock_irq(&mdev->tconn->req_lock);
atomic_add(size >> 9, &mdev->rs_sect_ev); atomic_add(size >> 9, &mdev->rs_sect_ev);
if (drbd_submit_ee(mdev, e, READ, DRBD_FAULT_RS_RD) == 0) if (drbd_submit_ee(mdev, e, READ, DRBD_FAULT_RS_RD) == 0)
...@@ -371,9 +371,9 @@ static int read_for_csum(struct drbd_conf *mdev, sector_t sector, int size) ...@@ -371,9 +371,9 @@ static int read_for_csum(struct drbd_conf *mdev, sector_t sector, int size)
* because bio_add_page failed (probably broken lower level driver), * because bio_add_page failed (probably broken lower level driver),
* retry may or may not help. * retry may or may not help.
* If it does not, you may need to force disconnect. */ * If it does not, you may need to force disconnect. */
spin_lock_irq(&mdev->req_lock); spin_lock_irq(&mdev->tconn->req_lock);
list_del(&e->w.list); list_del(&e->w.list);
spin_unlock_irq(&mdev->req_lock); spin_unlock_irq(&mdev->tconn->req_lock);
drbd_free_ee(mdev, e); drbd_free_ee(mdev, e);
defer: defer:
...@@ -793,7 +793,7 @@ int drbd_resync_finished(struct drbd_conf *mdev) ...@@ -793,7 +793,7 @@ int drbd_resync_finished(struct drbd_conf *mdev)
ping_peer(mdev); ping_peer(mdev);
spin_lock_irq(&mdev->req_lock); spin_lock_irq(&mdev->tconn->req_lock);
os = mdev->state; os = mdev->state;
verify_done = (os.conn == C_VERIFY_S || os.conn == C_VERIFY_T); verify_done = (os.conn == C_VERIFY_S || os.conn == C_VERIFY_T);
...@@ -882,7 +882,7 @@ int drbd_resync_finished(struct drbd_conf *mdev) ...@@ -882,7 +882,7 @@ int drbd_resync_finished(struct drbd_conf *mdev)
_drbd_set_state(mdev, ns, CS_VERBOSE, NULL); _drbd_set_state(mdev, ns, CS_VERBOSE, NULL);
out_unlock: out_unlock:
spin_unlock_irq(&mdev->req_lock); spin_unlock_irq(&mdev->tconn->req_lock);
put_ldev(mdev); put_ldev(mdev);
out: out:
mdev->rs_total = 0; mdev->rs_total = 0;
...@@ -907,9 +907,9 @@ static void move_to_net_ee_or_free(struct drbd_conf *mdev, struct drbd_epoch_ent ...@@ -907,9 +907,9 @@ static void move_to_net_ee_or_free(struct drbd_conf *mdev, struct drbd_epoch_ent
int i = (e->i.size + PAGE_SIZE -1) >> PAGE_SHIFT; int i = (e->i.size + PAGE_SIZE -1) >> PAGE_SHIFT;
atomic_add(i, &mdev->pp_in_use_by_net); atomic_add(i, &mdev->pp_in_use_by_net);
atomic_sub(i, &mdev->pp_in_use); atomic_sub(i, &mdev->pp_in_use);
spin_lock_irq(&mdev->req_lock); spin_lock_irq(&mdev->tconn->req_lock);
list_add_tail(&e->w.list, &mdev->net_ee); list_add_tail(&e->w.list, &mdev->net_ee);
spin_unlock_irq(&mdev->req_lock); spin_unlock_irq(&mdev->tconn->req_lock);
wake_up(&drbd_pp_wait); wake_up(&drbd_pp_wait);
} else } else
drbd_free_ee(mdev, e); drbd_free_ee(mdev, e);
...@@ -1210,10 +1210,10 @@ int w_send_barrier(struct drbd_conf *mdev, struct drbd_work *w, int cancel) ...@@ -1210,10 +1210,10 @@ int w_send_barrier(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
* actually, this race was harmless, since we only try to send the * actually, this race was harmless, since we only try to send the
* barrier packet here, and otherwise do nothing with the object. * barrier packet here, and otherwise do nothing with the object.
* but compare with the head of w_clear_epoch */ * but compare with the head of w_clear_epoch */
spin_lock_irq(&mdev->req_lock); spin_lock_irq(&mdev->tconn->req_lock);
if (w->cb != w_send_barrier || mdev->state.conn < C_CONNECTED) if (w->cb != w_send_barrier || mdev->state.conn < C_CONNECTED)
cancel = 1; cancel = 1;
spin_unlock_irq(&mdev->req_lock); spin_unlock_irq(&mdev->tconn->req_lock);
if (cancel) if (cancel)
return 1; return 1;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment