Commit a21e9298 authored by Philipp Reisner's avatar Philipp Reisner

drbd: Moved the mdev member into drbd_work (from drbd_request and drbd_peer_request)

Signed-off-by: default avatarPhilipp Reisner <philipp.reisner@linbit.com>
Signed-off-by: default avatarLars Ellenberg <lars.ellenberg@linbit.com>
parent 360cc740
...@@ -228,6 +228,7 @@ void drbd_al_begin_io(struct drbd_conf *mdev, sector_t sector) ...@@ -228,6 +228,7 @@ void drbd_al_begin_io(struct drbd_conf *mdev, sector_t sector)
al_work.enr = enr; al_work.enr = enr;
al_work.old_enr = al_ext->lc_number; al_work.old_enr = al_ext->lc_number;
al_work.w.cb = w_al_write_transaction; al_work.w.cb = w_al_write_transaction;
al_work.w.mdev = mdev;
drbd_queue_work_front(&mdev->tconn->data.work, &al_work.w); drbd_queue_work_front(&mdev->tconn->data.work, &al_work.w);
wait_for_completion(&al_work.event); wait_for_completion(&al_work.event);
...@@ -717,6 +718,7 @@ static void drbd_try_clear_on_disk_bm(struct drbd_conf *mdev, sector_t sector, ...@@ -717,6 +718,7 @@ static void drbd_try_clear_on_disk_bm(struct drbd_conf *mdev, sector_t sector,
if (udw) { if (udw) {
udw->enr = ext->lce.lc_number; udw->enr = ext->lce.lc_number;
udw->w.cb = w_update_odbm; udw->w.cb = w_update_odbm;
udw->w.mdev = mdev;
drbd_queue_work_front(&mdev->tconn->data.work, &udw->w); drbd_queue_work_front(&mdev->tconn->data.work, &udw->w);
} else { } else {
dev_warn(DEV, "Could not kmalloc an udw\n"); dev_warn(DEV, "Could not kmalloc an udw\n");
......
...@@ -645,13 +645,13 @@ typedef int (*drbd_work_cb)(struct drbd_conf *, struct drbd_work *, int cancel); ...@@ -645,13 +645,13 @@ typedef int (*drbd_work_cb)(struct drbd_conf *, struct drbd_work *, int cancel);
struct drbd_work { struct drbd_work {
struct list_head list; struct list_head list;
drbd_work_cb cb; drbd_work_cb cb;
struct drbd_conf *mdev;
}; };
#include "drbd_interval.h" #include "drbd_interval.h"
struct drbd_request { struct drbd_request {
struct drbd_work w; struct drbd_work w;
struct drbd_conf *mdev;
/* if local IO is not allowed, will be NULL. /* if local IO is not allowed, will be NULL.
* if local IO _is_ allowed, holds the locally submitted bio clone, * if local IO _is_ allowed, holds the locally submitted bio clone,
...@@ -715,7 +715,6 @@ struct digest_info { ...@@ -715,7 +715,6 @@ struct digest_info {
struct drbd_peer_request { struct drbd_peer_request {
struct drbd_work w; struct drbd_work w;
struct drbd_epoch *epoch; /* for writes */ struct drbd_epoch *epoch; /* for writes */
struct drbd_conf *mdev;
struct page *pages; struct page *pages;
atomic_t pending_bios; atomic_t pending_bios;
struct drbd_interval i; struct drbd_interval i;
...@@ -1537,7 +1536,7 @@ extern void _drbd_wait_ee_list_empty(struct drbd_conf *mdev, ...@@ -1537,7 +1536,7 @@ extern void _drbd_wait_ee_list_empty(struct drbd_conf *mdev,
struct list_head *head); struct list_head *head);
extern void drbd_set_recv_tcq(struct drbd_conf *mdev, int tcq_enabled); extern void drbd_set_recv_tcq(struct drbd_conf *mdev, int tcq_enabled);
extern void _drbd_clear_done_ee(struct drbd_conf *mdev, struct list_head *to_be_freed); extern void _drbd_clear_done_ee(struct drbd_conf *mdev, struct list_head *to_be_freed);
extern void drbd_flush_workqueue(struct drbd_tconn *tconn); extern void drbd_flush_workqueue(struct drbd_conf *mdev);
/* yes, there is kernel_setsockopt, but only since 2.6.18. we don't need to /* yes, there is kernel_setsockopt, but only since 2.6.18. we don't need to
* mess with get_fs/set_fs, we know we are KERNEL_DS always. */ * mess with get_fs/set_fs, we know we are KERNEL_DS always. */
......
...@@ -1836,6 +1836,14 @@ void drbd_init_set_defaults(struct drbd_conf *mdev) ...@@ -1836,6 +1836,14 @@ void drbd_init_set_defaults(struct drbd_conf *mdev)
mdev->md_sync_work.cb = w_md_sync; mdev->md_sync_work.cb = w_md_sync;
mdev->bm_io_work.w.cb = w_bitmap_io; mdev->bm_io_work.w.cb = w_bitmap_io;
mdev->start_resync_work.cb = w_start_resync; mdev->start_resync_work.cb = w_start_resync;
mdev->resync_work.mdev = mdev;
mdev->unplug_work.mdev = mdev;
mdev->go_diskless.mdev = mdev;
mdev->md_sync_work.mdev = mdev;
mdev->bm_io_work.w.mdev = mdev;
mdev->start_resync_work.mdev = mdev;
init_timer(&mdev->resync_timer); init_timer(&mdev->resync_timer);
init_timer(&mdev->md_sync_timer); init_timer(&mdev->md_sync_timer);
init_timer(&mdev->start_resync_timer); init_timer(&mdev->start_resync_timer);
......
...@@ -876,7 +876,7 @@ static void drbd_reconfig_start(struct drbd_conf *mdev) ...@@ -876,7 +876,7 @@ static void drbd_reconfig_start(struct drbd_conf *mdev)
wait_event(mdev->state_wait, !test_and_set_bit(CONFIG_PENDING, &mdev->flags)); wait_event(mdev->state_wait, !test_and_set_bit(CONFIG_PENDING, &mdev->flags));
wait_event(mdev->state_wait, !test_bit(DEVICE_DYING, &mdev->flags)); wait_event(mdev->state_wait, !test_bit(DEVICE_DYING, &mdev->flags));
drbd_thread_start(&mdev->tconn->worker); drbd_thread_start(&mdev->tconn->worker);
drbd_flush_workqueue(mdev->tconn); drbd_flush_workqueue(mdev);
} }
/* if still unconfigured, stops worker again. /* if still unconfigured, stops worker again.
...@@ -1076,7 +1076,7 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp ...@@ -1076,7 +1076,7 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
/* also wait for the last barrier ack. */ /* also wait for the last barrier ack. */
wait_event(mdev->misc_wait, !atomic_read(&mdev->ap_pending_cnt) || is_susp(mdev->state)); wait_event(mdev->misc_wait, !atomic_read(&mdev->ap_pending_cnt) || is_susp(mdev->state));
/* and for any other previously queued work */ /* and for any other previously queued work */
drbd_flush_workqueue(mdev->tconn); drbd_flush_workqueue(mdev);
rv = _drbd_request_state(mdev, NS(disk, D_ATTACHING), CS_VERBOSE); rv = _drbd_request_state(mdev, NS(disk, D_ATTACHING), CS_VERBOSE);
retcode = rv; /* FIXME: Type mismatch. */ retcode = rv; /* FIXME: Type mismatch. */
...@@ -1520,7 +1520,7 @@ static int drbd_nl_net_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, ...@@ -1520,7 +1520,7 @@ static int drbd_nl_net_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
} }
} }
drbd_flush_workqueue(mdev->tconn); drbd_flush_workqueue(mdev);
spin_lock_irq(&mdev->tconn->req_lock); spin_lock_irq(&mdev->tconn->req_lock);
if (mdev->tconn->net_conf != NULL) { if (mdev->tconn->net_conf != NULL) {
retcode = ERR_NET_CONFIGURED; retcode = ERR_NET_CONFIGURED;
......
...@@ -345,7 +345,7 @@ drbd_alloc_ee(struct drbd_conf *mdev, u64 id, sector_t sector, ...@@ -345,7 +345,7 @@ drbd_alloc_ee(struct drbd_conf *mdev, u64 id, sector_t sector,
peer_req->i.waiting = false; peer_req->i.waiting = false;
peer_req->epoch = NULL; peer_req->epoch = NULL;
peer_req->mdev = mdev; peer_req->w.mdev = mdev;
peer_req->pages = page; peer_req->pages = page;
atomic_set(&peer_req->pending_bios, 0); atomic_set(&peer_req->pending_bios, 0);
peer_req->flags = 0; peer_req->flags = 0;
...@@ -3820,13 +3820,14 @@ static void drbdd(struct drbd_tconn *tconn) ...@@ -3820,13 +3820,14 @@ static void drbdd(struct drbd_tconn *tconn)
} }
} }
void drbd_flush_workqueue(struct drbd_tconn *tconn) void drbd_flush_workqueue(struct drbd_conf *mdev)
{ {
struct drbd_wq_barrier barr; struct drbd_wq_barrier barr;
barr.w.cb = w_prev_work_done; barr.w.cb = w_prev_work_done;
barr.w.mdev = mdev;
init_completion(&barr.done); init_completion(&barr.done);
drbd_queue_work(&tconn->data.work, &barr.w); drbd_queue_work(&mdev->tconn->data.work, &barr.w);
wait_for_completion(&barr.done); wait_for_completion(&barr.done);
} }
...@@ -3906,7 +3907,7 @@ static int drbd_disconnected(int vnr, void *p, void *data) ...@@ -3906,7 +3907,7 @@ static int drbd_disconnected(int vnr, void *p, void *data)
/* wait for all w_e_end_data_req, w_e_end_rsdata_req, w_send_barrier, /* wait for all w_e_end_data_req, w_e_end_rsdata_req, w_send_barrier,
* w_make_resync_request etc. which may still be on the worker queue * w_make_resync_request etc. which may still be on the worker queue
* to be "canceled" */ * to be "canceled" */
drbd_flush_workqueue(mdev->tconn); drbd_flush_workqueue(mdev);
/* This also does reclaim_net_ee(). If we do this too early, we might /* This also does reclaim_net_ee(). If we do this too early, we might
* miss some resync ee and pages.*/ * miss some resync ee and pages.*/
...@@ -4507,6 +4508,7 @@ static int got_OVResult(struct drbd_conf *mdev, enum drbd_packet cmd) ...@@ -4507,6 +4508,7 @@ static int got_OVResult(struct drbd_conf *mdev, enum drbd_packet cmd)
w = kmalloc(sizeof(*w), GFP_NOIO); w = kmalloc(sizeof(*w), GFP_NOIO);
if (w) { if (w) {
w->cb = w_ov_finished; w->cb = w_ov_finished;
w->mdev = mdev;
drbd_queue_work_front(&mdev->tconn->data.work, w); drbd_queue_work_front(&mdev->tconn->data.work, w);
} else { } else {
dev_err(DEV, "kmalloc(w) failed."); dev_err(DEV, "kmalloc(w) failed.");
......
...@@ -67,7 +67,7 @@ static struct drbd_request *drbd_req_new(struct drbd_conf *mdev, ...@@ -67,7 +67,7 @@ static struct drbd_request *drbd_req_new(struct drbd_conf *mdev,
drbd_req_make_private_bio(req, bio_src); drbd_req_make_private_bio(req, bio_src);
req->rq_state = bio_data_dir(bio_src) == WRITE ? RQ_WRITE : 0; req->rq_state = bio_data_dir(bio_src) == WRITE ? RQ_WRITE : 0;
req->mdev = mdev; req->w.mdev = mdev;
req->master_bio = bio_src; req->master_bio = bio_src;
req->epoch = 0; req->epoch = 0;
...@@ -155,6 +155,7 @@ static void queue_barrier(struct drbd_conf *mdev) ...@@ -155,6 +155,7 @@ static void queue_barrier(struct drbd_conf *mdev)
b = mdev->tconn->newest_tle; b = mdev->tconn->newest_tle;
b->w.cb = w_send_barrier; b->w.cb = w_send_barrier;
b->w.mdev = mdev;
/* inc_ap_pending done here, so we won't /* inc_ap_pending done here, so we won't
* get imbalanced on connection loss. * get imbalanced on connection loss.
* dec_ap_pending will be done in got_BarrierAck * dec_ap_pending will be done in got_BarrierAck
...@@ -192,7 +193,7 @@ void complete_master_bio(struct drbd_conf *mdev, ...@@ -192,7 +193,7 @@ void complete_master_bio(struct drbd_conf *mdev,
static void drbd_remove_request_interval(struct rb_root *root, static void drbd_remove_request_interval(struct rb_root *root,
struct drbd_request *req) struct drbd_request *req)
{ {
struct drbd_conf *mdev = req->mdev; struct drbd_conf *mdev = req->w.mdev;
struct drbd_interval *i = &req->i; struct drbd_interval *i = &req->i;
drbd_remove_interval(root, i); drbd_remove_interval(root, i);
...@@ -211,7 +212,7 @@ static void drbd_remove_request_interval(struct rb_root *root, ...@@ -211,7 +212,7 @@ static void drbd_remove_request_interval(struct rb_root *root,
void _req_may_be_done(struct drbd_request *req, struct bio_and_error *m) void _req_may_be_done(struct drbd_request *req, struct bio_and_error *m)
{ {
const unsigned long s = req->rq_state; const unsigned long s = req->rq_state;
struct drbd_conf *mdev = req->mdev; struct drbd_conf *mdev = req->w.mdev;
/* only WRITES may end up here without a master bio (on barrier ack) */ /* only WRITES may end up here without a master bio (on barrier ack) */
int rw = req->master_bio ? bio_data_dir(req->master_bio) : WRITE; int rw = req->master_bio ? bio_data_dir(req->master_bio) : WRITE;
...@@ -294,7 +295,7 @@ void _req_may_be_done(struct drbd_request *req, struct bio_and_error *m) ...@@ -294,7 +295,7 @@ void _req_may_be_done(struct drbd_request *req, struct bio_and_error *m)
static void _req_may_be_done_not_susp(struct drbd_request *req, struct bio_and_error *m) static void _req_may_be_done_not_susp(struct drbd_request *req, struct bio_and_error *m)
{ {
struct drbd_conf *mdev = req->mdev; struct drbd_conf *mdev = req->w.mdev;
if (!is_susp(mdev->state)) if (!is_susp(mdev->state))
_req_may_be_done(req, m); _req_may_be_done(req, m);
...@@ -315,7 +316,7 @@ static void _req_may_be_done_not_susp(struct drbd_request *req, struct bio_and_e ...@@ -315,7 +316,7 @@ static void _req_may_be_done_not_susp(struct drbd_request *req, struct bio_and_e
int __req_mod(struct drbd_request *req, enum drbd_req_event what, int __req_mod(struct drbd_request *req, enum drbd_req_event what,
struct bio_and_error *m) struct bio_and_error *m)
{ {
struct drbd_conf *mdev = req->mdev; struct drbd_conf *mdev = req->w.mdev;
int rv = 0; int rv = 0;
m->bio = NULL; m->bio = NULL;
......
...@@ -255,7 +255,7 @@ extern void tl_restart(struct drbd_conf *mdev, enum drbd_req_event what); ...@@ -255,7 +255,7 @@ extern void tl_restart(struct drbd_conf *mdev, enum drbd_req_event what);
* outside the spinlock, e.g. when walking some list on cleanup. */ * outside the spinlock, e.g. when walking some list on cleanup. */
static inline int _req_mod(struct drbd_request *req, enum drbd_req_event what) static inline int _req_mod(struct drbd_request *req, enum drbd_req_event what)
{ {
struct drbd_conf *mdev = req->mdev; struct drbd_conf *mdev = req->w.mdev;
struct bio_and_error m; struct bio_and_error m;
int rv; int rv;
...@@ -275,7 +275,7 @@ static inline int req_mod(struct drbd_request *req, ...@@ -275,7 +275,7 @@ static inline int req_mod(struct drbd_request *req,
enum drbd_req_event what) enum drbd_req_event what)
{ {
unsigned long flags; unsigned long flags;
struct drbd_conf *mdev = req->mdev; struct drbd_conf *mdev = req->w.mdev;
struct bio_and_error m; struct bio_and_error m;
int rv; int rv;
......
...@@ -843,6 +843,7 @@ __drbd_set_state(struct drbd_conf *mdev, union drbd_state ns, ...@@ -843,6 +843,7 @@ __drbd_set_state(struct drbd_conf *mdev, union drbd_state ns,
ascw->ns = ns; ascw->ns = ns;
ascw->flags = flags; ascw->flags = flags;
ascw->w.cb = w_after_state_ch; ascw->w.cb = w_after_state_ch;
ascw->w.mdev = mdev;
ascw->done = done; ascw->done = done;
drbd_queue_work(&mdev->tconn->data.work, &ascw->w); drbd_queue_work(&mdev->tconn->data.work, &ascw->w);
} else { } else {
......
...@@ -83,7 +83,7 @@ void drbd_md_io_complete(struct bio *bio, int error) ...@@ -83,7 +83,7 @@ void drbd_md_io_complete(struct bio *bio, int error)
void drbd_endio_read_sec_final(struct drbd_peer_request *peer_req) __releases(local) void drbd_endio_read_sec_final(struct drbd_peer_request *peer_req) __releases(local)
{ {
unsigned long flags = 0; unsigned long flags = 0;
struct drbd_conf *mdev = peer_req->mdev; struct drbd_conf *mdev = peer_req->w.mdev;
spin_lock_irqsave(&mdev->tconn->req_lock, flags); spin_lock_irqsave(&mdev->tconn->req_lock, flags);
mdev->read_cnt += peer_req->i.size >> 9; mdev->read_cnt += peer_req->i.size >> 9;
...@@ -103,7 +103,7 @@ void drbd_endio_read_sec_final(struct drbd_peer_request *peer_req) __releases(lo ...@@ -103,7 +103,7 @@ void drbd_endio_read_sec_final(struct drbd_peer_request *peer_req) __releases(lo
static void drbd_endio_write_sec_final(struct drbd_peer_request *peer_req) __releases(local) static void drbd_endio_write_sec_final(struct drbd_peer_request *peer_req) __releases(local)
{ {
unsigned long flags = 0; unsigned long flags = 0;
struct drbd_conf *mdev = peer_req->mdev; struct drbd_conf *mdev = peer_req->w.mdev;
sector_t e_sector; sector_t e_sector;
int do_wake; int do_wake;
u64 block_id; u64 block_id;
...@@ -155,7 +155,7 @@ static void drbd_endio_write_sec_final(struct drbd_peer_request *peer_req) __rel ...@@ -155,7 +155,7 @@ static void drbd_endio_write_sec_final(struct drbd_peer_request *peer_req) __rel
void drbd_endio_sec(struct bio *bio, int error) void drbd_endio_sec(struct bio *bio, int error)
{ {
struct drbd_peer_request *peer_req = bio->bi_private; struct drbd_peer_request *peer_req = bio->bi_private;
struct drbd_conf *mdev = peer_req->mdev; struct drbd_conf *mdev = peer_req->w.mdev;
int uptodate = bio_flagged(bio, BIO_UPTODATE); int uptodate = bio_flagged(bio, BIO_UPTODATE);
int is_write = bio_data_dir(bio) == WRITE; int is_write = bio_data_dir(bio) == WRITE;
...@@ -192,7 +192,7 @@ void drbd_endio_pri(struct bio *bio, int error) ...@@ -192,7 +192,7 @@ void drbd_endio_pri(struct bio *bio, int error)
{ {
unsigned long flags; unsigned long flags;
struct drbd_request *req = bio->bi_private; struct drbd_request *req = bio->bi_private;
struct drbd_conf *mdev = req->mdev; struct drbd_conf *mdev = req->w.mdev;
struct bio_and_error m; struct bio_and_error m;
enum drbd_req_event what; enum drbd_req_event what;
int uptodate = bio_flagged(bio, BIO_UPTODATE); int uptodate = bio_flagged(bio, BIO_UPTODATE);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment