Commit 3967deb1 authored by Andreas Gruenbacher's avatar Andreas Gruenbacher Committed by Philipp Reisner

drbd: Rename drbd_free_ee() and variants to *_peer_req()

Signed-off-by: default avatarPhilipp Reisner <philipp.reisner@linbit.com>
Signed-off-by: default avatarLars Ellenberg <lars.ellenberg@linbit.com>
parent 0db55363
...@@ -1459,10 +1459,10 @@ extern int drbd_release_ee(struct drbd_conf *mdev, struct list_head *list); ...@@ -1459,10 +1459,10 @@ extern int drbd_release_ee(struct drbd_conf *mdev, struct list_head *list);
extern struct drbd_peer_request *drbd_alloc_peer_req(struct drbd_conf *, u64, extern struct drbd_peer_request *drbd_alloc_peer_req(struct drbd_conf *, u64,
sector_t, unsigned int, sector_t, unsigned int,
gfp_t) __must_hold(local); gfp_t) __must_hold(local);
extern void drbd_free_some_ee(struct drbd_conf *, struct drbd_peer_request *, extern void __drbd_free_peer_req(struct drbd_conf *, struct drbd_peer_request *,
int); int);
#define drbd_free_ee(m,e) drbd_free_some_ee(m, e, 0) #define drbd_free_peer_req(m,e) __drbd_free_peer_req(m, e, 0)
#define drbd_free_net_ee(m,e) drbd_free_some_ee(m, e, 1) #define drbd_free_net_peer_req(m,e) __drbd_free_peer_req(m, e, 1)
extern void drbd_wait_ee_list_empty(struct drbd_conf *mdev, extern void drbd_wait_ee_list_empty(struct drbd_conf *mdev,
struct list_head *head); struct list_head *head);
extern void _drbd_wait_ee_list_empty(struct drbd_conf *mdev, extern void _drbd_wait_ee_list_empty(struct drbd_conf *mdev,
......
...@@ -223,7 +223,7 @@ static void drbd_kick_lo_and_reclaim_net(struct drbd_conf *mdev) ...@@ -223,7 +223,7 @@ static void drbd_kick_lo_and_reclaim_net(struct drbd_conf *mdev)
spin_unlock_irq(&mdev->tconn->req_lock); spin_unlock_irq(&mdev->tconn->req_lock);
list_for_each_entry_safe(peer_req, t, &reclaimed, w.list) list_for_each_entry_safe(peer_req, t, &reclaimed, w.list)
drbd_free_net_ee(mdev, peer_req); drbd_free_net_peer_req(mdev, peer_req);
} }
/** /**
...@@ -307,7 +307,7 @@ You need to hold the req_lock: ...@@ -307,7 +307,7 @@ You need to hold the req_lock:
_drbd_wait_ee_list_empty() _drbd_wait_ee_list_empty()
You must not have the req_lock: You must not have the req_lock:
drbd_free_ee() drbd_free_peer_req()
drbd_alloc_peer_req() drbd_alloc_peer_req()
drbd_release_ee() drbd_release_ee()
drbd_ee_fix_bhs() drbd_ee_fix_bhs()
...@@ -362,7 +362,7 @@ drbd_alloc_peer_req(struct drbd_conf *mdev, u64 id, sector_t sector, ...@@ -362,7 +362,7 @@ drbd_alloc_peer_req(struct drbd_conf *mdev, u64 id, sector_t sector,
return NULL; return NULL;
} }
void drbd_free_some_ee(struct drbd_conf *mdev, struct drbd_peer_request *peer_req, void __drbd_free_peer_req(struct drbd_conf *mdev, struct drbd_peer_request *peer_req,
int is_net) int is_net)
{ {
if (peer_req->flags & EE_HAS_DIGEST) if (peer_req->flags & EE_HAS_DIGEST)
...@@ -385,7 +385,7 @@ int drbd_release_ee(struct drbd_conf *mdev, struct list_head *list) ...@@ -385,7 +385,7 @@ int drbd_release_ee(struct drbd_conf *mdev, struct list_head *list)
spin_unlock_irq(&mdev->tconn->req_lock); spin_unlock_irq(&mdev->tconn->req_lock);
list_for_each_entry_safe(peer_req, t, &work_list, w.list) { list_for_each_entry_safe(peer_req, t, &work_list, w.list) {
drbd_free_some_ee(mdev, peer_req, is_net); __drbd_free_peer_req(mdev, peer_req, is_net);
count++; count++;
} }
return count; return count;
...@@ -412,7 +412,7 @@ static int drbd_process_done_ee(struct drbd_conf *mdev) ...@@ -412,7 +412,7 @@ static int drbd_process_done_ee(struct drbd_conf *mdev)
spin_unlock_irq(&mdev->tconn->req_lock); spin_unlock_irq(&mdev->tconn->req_lock);
list_for_each_entry_safe(peer_req, t, &reclaimed, w.list) list_for_each_entry_safe(peer_req, t, &reclaimed, w.list)
drbd_free_net_ee(mdev, peer_req); drbd_free_net_peer_req(mdev, peer_req);
/* possible callbacks here: /* possible callbacks here:
* e_end_block, and e_end_resync_block, e_send_discard_write. * e_end_block, and e_end_resync_block, e_send_discard_write.
...@@ -425,7 +425,7 @@ static int drbd_process_done_ee(struct drbd_conf *mdev) ...@@ -425,7 +425,7 @@ static int drbd_process_done_ee(struct drbd_conf *mdev)
err2 = peer_req->w.cb(&peer_req->w, !!err); err2 = peer_req->w.cb(&peer_req->w, !!err);
if (!err) if (!err)
err = err2; err = err2;
drbd_free_ee(mdev, peer_req); drbd_free_peer_req(mdev, peer_req);
} }
wake_up(&mdev->ee_wait); wake_up(&mdev->ee_wait);
...@@ -1395,7 +1395,7 @@ read_in_block(struct drbd_conf *mdev, u64 id, sector_t sector, ...@@ -1395,7 +1395,7 @@ read_in_block(struct drbd_conf *mdev, u64 id, sector_t sector,
} }
kunmap(page); kunmap(page);
if (err) { if (err) {
drbd_free_ee(mdev, peer_req); drbd_free_peer_req(mdev, peer_req);
return NULL; return NULL;
} }
ds -= len; ds -= len;
...@@ -1406,7 +1406,7 @@ read_in_block(struct drbd_conf *mdev, u64 id, sector_t sector, ...@@ -1406,7 +1406,7 @@ read_in_block(struct drbd_conf *mdev, u64 id, sector_t sector,
if (memcmp(dig_in, dig_vv, dgs)) { if (memcmp(dig_in, dig_vv, dgs)) {
dev_err(DEV, "Digest integrity check FAILED: %llus +%u\n", dev_err(DEV, "Digest integrity check FAILED: %llus +%u\n",
(unsigned long long)sector, data_size); (unsigned long long)sector, data_size);
drbd_free_ee(mdev, peer_req); drbd_free_peer_req(mdev, peer_req);
return NULL; return NULL;
} }
} }
...@@ -1547,7 +1547,7 @@ static int recv_resync_read(struct drbd_conf *mdev, sector_t sector, int data_si ...@@ -1547,7 +1547,7 @@ static int recv_resync_read(struct drbd_conf *mdev, sector_t sector, int data_si
list_del(&peer_req->w.list); list_del(&peer_req->w.list);
spin_unlock_irq(&mdev->tconn->req_lock); spin_unlock_irq(&mdev->tconn->req_lock);
drbd_free_ee(mdev, peer_req); drbd_free_peer_req(mdev, peer_req);
fail: fail:
put_ldev(mdev); put_ldev(mdev);
return -EIO; return -EIO;
...@@ -2109,7 +2109,7 @@ static int receive_Data(struct drbd_tconn *tconn, struct packet_info *pi) ...@@ -2109,7 +2109,7 @@ static int receive_Data(struct drbd_tconn *tconn, struct packet_info *pi)
out_interrupted: out_interrupted:
drbd_may_finish_epoch(mdev, peer_req->epoch, EV_PUT + EV_CLEANUP); drbd_may_finish_epoch(mdev, peer_req->epoch, EV_PUT + EV_CLEANUP);
put_ldev(mdev); put_ldev(mdev);
drbd_free_ee(mdev, peer_req); drbd_free_peer_req(mdev, peer_req);
return err; return err;
} }
...@@ -2364,7 +2364,7 @@ static int receive_DataRequest(struct drbd_tconn *tconn, struct packet_info *pi) ...@@ -2364,7 +2364,7 @@ static int receive_DataRequest(struct drbd_tconn *tconn, struct packet_info *pi)
out_free_e: out_free_e:
put_ldev(mdev); put_ldev(mdev);
drbd_free_ee(mdev, peer_req); drbd_free_peer_req(mdev, peer_req);
return -EIO; return -EIO;
} }
......
...@@ -319,7 +319,7 @@ static int w_e_send_csum(struct drbd_work *w, int cancel) ...@@ -319,7 +319,7 @@ static int w_e_send_csum(struct drbd_work *w, int cancel)
* some distributed deadlock, if the other side blocks on * some distributed deadlock, if the other side blocks on
* congestion as well, because our receiver blocks in * congestion as well, because our receiver blocks in
* drbd_pp_alloc due to pp_in_use > max_buffers. */ * drbd_pp_alloc due to pp_in_use > max_buffers. */
drbd_free_ee(mdev, peer_req); drbd_free_peer_req(mdev, peer_req);
peer_req = NULL; peer_req = NULL;
inc_rs_pending(mdev); inc_rs_pending(mdev);
err = drbd_send_drequest_csum(mdev, sector, size, err = drbd_send_drequest_csum(mdev, sector, size,
...@@ -333,7 +333,7 @@ static int w_e_send_csum(struct drbd_work *w, int cancel) ...@@ -333,7 +333,7 @@ static int w_e_send_csum(struct drbd_work *w, int cancel)
out: out:
if (peer_req) if (peer_req)
drbd_free_ee(mdev, peer_req); drbd_free_peer_req(mdev, peer_req);
if (unlikely(err)) if (unlikely(err))
dev_err(DEV, "drbd_send_drequest(..., csum) failed\n"); dev_err(DEV, "drbd_send_drequest(..., csum) failed\n");
...@@ -376,7 +376,7 @@ static int read_for_csum(struct drbd_conf *mdev, sector_t sector, int size) ...@@ -376,7 +376,7 @@ static int read_for_csum(struct drbd_conf *mdev, sector_t sector, int size)
list_del(&peer_req->w.list); list_del(&peer_req->w.list);
spin_unlock_irq(&mdev->tconn->req_lock); spin_unlock_irq(&mdev->tconn->req_lock);
drbd_free_ee(mdev, peer_req); drbd_free_peer_req(mdev, peer_req);
defer: defer:
put_ldev(mdev); put_ldev(mdev);
return -EAGAIN; return -EAGAIN;
...@@ -900,7 +900,7 @@ static void move_to_net_ee_or_free(struct drbd_conf *mdev, struct drbd_peer_requ ...@@ -900,7 +900,7 @@ static void move_to_net_ee_or_free(struct drbd_conf *mdev, struct drbd_peer_requ
spin_unlock_irq(&mdev->tconn->req_lock); spin_unlock_irq(&mdev->tconn->req_lock);
wake_up(&drbd_pp_wait); wake_up(&drbd_pp_wait);
} else } else
drbd_free_ee(mdev, peer_req); drbd_free_peer_req(mdev, peer_req);
} }
/** /**
...@@ -916,7 +916,7 @@ int w_e_end_data_req(struct drbd_work *w, int cancel) ...@@ -916,7 +916,7 @@ int w_e_end_data_req(struct drbd_work *w, int cancel)
int err; int err;
if (unlikely(cancel)) { if (unlikely(cancel)) {
drbd_free_ee(mdev, peer_req); drbd_free_peer_req(mdev, peer_req);
dec_unacked(mdev); dec_unacked(mdev);
return 0; return 0;
} }
...@@ -953,7 +953,7 @@ int w_e_end_rsdata_req(struct drbd_work *w, int cancel) ...@@ -953,7 +953,7 @@ int w_e_end_rsdata_req(struct drbd_work *w, int cancel)
int err; int err;
if (unlikely(cancel)) { if (unlikely(cancel)) {
drbd_free_ee(mdev, peer_req); drbd_free_peer_req(mdev, peer_req);
dec_unacked(mdev); dec_unacked(mdev);
return 0; return 0;
} }
...@@ -1005,7 +1005,7 @@ int w_e_end_csum_rs_req(struct drbd_work *w, int cancel) ...@@ -1005,7 +1005,7 @@ int w_e_end_csum_rs_req(struct drbd_work *w, int cancel)
int err, eq = 0; int err, eq = 0;
if (unlikely(cancel)) { if (unlikely(cancel)) {
drbd_free_ee(mdev, peer_req); drbd_free_peer_req(mdev, peer_req);
dec_unacked(mdev); dec_unacked(mdev);
return 0; return 0;
} }
...@@ -1088,7 +1088,7 @@ int w_e_end_ov_req(struct drbd_work *w, int cancel) ...@@ -1088,7 +1088,7 @@ int w_e_end_ov_req(struct drbd_work *w, int cancel)
* some distributed deadlock, if the other side blocks on * some distributed deadlock, if the other side blocks on
* congestion as well, because our receiver blocks in * congestion as well, because our receiver blocks in
* drbd_pp_alloc due to pp_in_use > max_buffers. */ * drbd_pp_alloc due to pp_in_use > max_buffers. */
drbd_free_ee(mdev, peer_req); drbd_free_peer_req(mdev, peer_req);
peer_req = NULL; peer_req = NULL;
inc_rs_pending(mdev); inc_rs_pending(mdev);
err = drbd_send_drequest_csum(mdev, sector, size, digest, digest_size, P_OV_REPLY); err = drbd_send_drequest_csum(mdev, sector, size, digest, digest_size, P_OV_REPLY);
...@@ -1098,7 +1098,7 @@ int w_e_end_ov_req(struct drbd_work *w, int cancel) ...@@ -1098,7 +1098,7 @@ int w_e_end_ov_req(struct drbd_work *w, int cancel)
out: out:
if (peer_req) if (peer_req)
drbd_free_ee(mdev, peer_req); drbd_free_peer_req(mdev, peer_req);
dec_unacked(mdev); dec_unacked(mdev);
return err; return err;
} }
...@@ -1126,7 +1126,7 @@ int w_e_end_ov_reply(struct drbd_work *w, int cancel) ...@@ -1126,7 +1126,7 @@ int w_e_end_ov_reply(struct drbd_work *w, int cancel)
int err, eq = 0; int err, eq = 0;
if (unlikely(cancel)) { if (unlikely(cancel)) {
drbd_free_ee(mdev, peer_req); drbd_free_peer_req(mdev, peer_req);
dec_unacked(mdev); dec_unacked(mdev);
return 0; return 0;
} }
...@@ -1157,7 +1157,7 @@ int w_e_end_ov_reply(struct drbd_work *w, int cancel) ...@@ -1157,7 +1157,7 @@ int w_e_end_ov_reply(struct drbd_work *w, int cancel)
* some distributed deadlock, if the other side blocks on * some distributed deadlock, if the other side blocks on
* congestion as well, because our receiver blocks in * congestion as well, because our receiver blocks in
* drbd_pp_alloc due to pp_in_use > max_buffers. */ * drbd_pp_alloc due to pp_in_use > max_buffers. */
drbd_free_ee(mdev, peer_req); drbd_free_peer_req(mdev, peer_req);
if (!eq) if (!eq)
drbd_ov_out_of_sync_found(mdev, sector, size); drbd_ov_out_of_sync_found(mdev, sector, size);
else else
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment