Commit 435f0740 authored by Lars Ellenberg's avatar Lars Ellenberg Committed by Philipp Reisner

drbd: don't count sendpage()d pages only referenced by tcp as in use

Signed-off-by: default avatarPhilipp Reisner <philipp.reisner@linbit.com>
Signed-off-by: default avatarLars Ellenberg <lars.ellenberg@linbit.com>
parent 76d2e7ec
...@@ -1077,7 +1077,8 @@ struct drbd_conf { ...@@ -1077,7 +1077,8 @@ struct drbd_conf {
int next_barrier_nr; int next_barrier_nr;
struct hlist_head *app_reads_hash; /* is proteced by req_lock */ struct hlist_head *app_reads_hash; /* is proteced by req_lock */
struct list_head resync_reads; struct list_head resync_reads;
atomic_t pp_in_use; atomic_t pp_in_use; /* allocated from page pool */
atomic_t pp_in_use_by_net; /* sendpage()d, still referenced by tcp */
wait_queue_head_t ee_wait; wait_queue_head_t ee_wait;
struct page *md_io_page; /* one page buffer for md_io */ struct page *md_io_page; /* one page buffer for md_io */
struct page *md_io_tmpp; /* for logical_block_size != 512 */ struct page *md_io_tmpp; /* for logical_block_size != 512 */
...@@ -1555,7 +1556,10 @@ extern struct drbd_epoch_entry *drbd_alloc_ee(struct drbd_conf *mdev, ...@@ -1555,7 +1556,10 @@ extern struct drbd_epoch_entry *drbd_alloc_ee(struct drbd_conf *mdev,
sector_t sector, sector_t sector,
unsigned int data_size, unsigned int data_size,
gfp_t gfp_mask) __must_hold(local); gfp_t gfp_mask) __must_hold(local);
extern void drbd_free_ee(struct drbd_conf *mdev, struct drbd_epoch_entry *e); extern void drbd_free_some_ee(struct drbd_conf *mdev, struct drbd_epoch_entry *e,
int is_net);
#define drbd_free_ee(m,e) drbd_free_some_ee(m, e, 0)
#define drbd_free_net_ee(m,e) drbd_free_some_ee(m, e, 1)
extern void drbd_wait_ee_list_empty(struct drbd_conf *mdev, extern void drbd_wait_ee_list_empty(struct drbd_conf *mdev,
struct list_head *head); struct list_head *head);
extern void _drbd_wait_ee_list_empty(struct drbd_conf *mdev, extern void _drbd_wait_ee_list_empty(struct drbd_conf *mdev,
......
...@@ -2753,6 +2753,7 @@ void drbd_init_set_defaults(struct drbd_conf *mdev) ...@@ -2753,6 +2753,7 @@ void drbd_init_set_defaults(struct drbd_conf *mdev)
atomic_set(&mdev->net_cnt, 0); atomic_set(&mdev->net_cnt, 0);
atomic_set(&mdev->packet_seq, 0); atomic_set(&mdev->packet_seq, 0);
atomic_set(&mdev->pp_in_use, 0); atomic_set(&mdev->pp_in_use, 0);
atomic_set(&mdev->pp_in_use_by_net, 0);
atomic_set(&mdev->rs_sect_in, 0); atomic_set(&mdev->rs_sect_in, 0);
atomic_set(&mdev->rs_sect_ev, 0); atomic_set(&mdev->rs_sect_ev, 0);
......
...@@ -241,7 +241,7 @@ static void drbd_kick_lo_and_reclaim_net(struct drbd_conf *mdev) ...@@ -241,7 +241,7 @@ static void drbd_kick_lo_and_reclaim_net(struct drbd_conf *mdev)
spin_unlock_irq(&mdev->req_lock); spin_unlock_irq(&mdev->req_lock);
list_for_each_entry_safe(e, t, &reclaimed, w.list) list_for_each_entry_safe(e, t, &reclaimed, w.list)
drbd_free_ee(mdev, e); drbd_free_net_ee(mdev, e);
} }
/** /**
...@@ -298,9 +298,11 @@ static struct page *drbd_pp_alloc(struct drbd_conf *mdev, unsigned number, bool ...@@ -298,9 +298,11 @@ static struct page *drbd_pp_alloc(struct drbd_conf *mdev, unsigned number, bool
* Is also used from inside an other spin_lock_irq(&mdev->req_lock); * Is also used from inside an other spin_lock_irq(&mdev->req_lock);
* Either links the page chain back to the global pool, * Either links the page chain back to the global pool,
* or returns all pages to the system. */ * or returns all pages to the system. */
static void drbd_pp_free(struct drbd_conf *mdev, struct page *page) static void drbd_pp_free(struct drbd_conf *mdev, struct page *page, int is_net)
{ {
atomic_t *a = is_net ? &mdev->pp_in_use_by_net : &mdev->pp_in_use;
int i; int i;
if (drbd_pp_vacant > (DRBD_MAX_SEGMENT_SIZE/PAGE_SIZE)*minor_count) if (drbd_pp_vacant > (DRBD_MAX_SEGMENT_SIZE/PAGE_SIZE)*minor_count)
i = page_chain_free(page); i = page_chain_free(page);
else { else {
...@@ -311,10 +313,10 @@ static void drbd_pp_free(struct drbd_conf *mdev, struct page *page) ...@@ -311,10 +313,10 @@ static void drbd_pp_free(struct drbd_conf *mdev, struct page *page)
drbd_pp_vacant += i; drbd_pp_vacant += i;
spin_unlock(&drbd_pp_lock); spin_unlock(&drbd_pp_lock);
} }
atomic_sub(i, &mdev->pp_in_use); i = atomic_sub_return(i, a);
i = atomic_read(&mdev->pp_in_use);
if (i < 0) if (i < 0)
dev_warn(DEV, "ASSERTION FAILED: pp_in_use: %d < 0\n", i); dev_warn(DEV, "ASSERTION FAILED: %s: %d < 0\n",
is_net ? "pp_in_use_by_net" : "pp_in_use", i);
wake_up(&drbd_pp_wait); wake_up(&drbd_pp_wait);
} }
...@@ -374,11 +376,11 @@ struct drbd_epoch_entry *drbd_alloc_ee(struct drbd_conf *mdev, ...@@ -374,11 +376,11 @@ struct drbd_epoch_entry *drbd_alloc_ee(struct drbd_conf *mdev,
return NULL; return NULL;
} }
void drbd_free_ee(struct drbd_conf *mdev, struct drbd_epoch_entry *e) void drbd_free_some_ee(struct drbd_conf *mdev, struct drbd_epoch_entry *e, int is_net)
{ {
if (e->flags & EE_HAS_DIGEST) if (e->flags & EE_HAS_DIGEST)
kfree(e->digest); kfree(e->digest);
drbd_pp_free(mdev, e->pages); drbd_pp_free(mdev, e->pages, is_net);
D_ASSERT(atomic_read(&e->pending_bios) == 0); D_ASSERT(atomic_read(&e->pending_bios) == 0);
D_ASSERT(hlist_unhashed(&e->colision)); D_ASSERT(hlist_unhashed(&e->colision));
mempool_free(e, drbd_ee_mempool); mempool_free(e, drbd_ee_mempool);
...@@ -389,13 +391,14 @@ int drbd_release_ee(struct drbd_conf *mdev, struct list_head *list) ...@@ -389,13 +391,14 @@ int drbd_release_ee(struct drbd_conf *mdev, struct list_head *list)
LIST_HEAD(work_list); LIST_HEAD(work_list);
struct drbd_epoch_entry *e, *t; struct drbd_epoch_entry *e, *t;
int count = 0; int count = 0;
int is_net = list == &mdev->net_ee;
spin_lock_irq(&mdev->req_lock); spin_lock_irq(&mdev->req_lock);
list_splice_init(list, &work_list); list_splice_init(list, &work_list);
spin_unlock_irq(&mdev->req_lock); spin_unlock_irq(&mdev->req_lock);
list_for_each_entry_safe(e, t, &work_list, w.list) { list_for_each_entry_safe(e, t, &work_list, w.list) {
drbd_free_ee(mdev, e); drbd_free_some_ee(mdev, e, is_net);
count++; count++;
} }
return count; return count;
...@@ -424,7 +427,7 @@ static int drbd_process_done_ee(struct drbd_conf *mdev) ...@@ -424,7 +427,7 @@ static int drbd_process_done_ee(struct drbd_conf *mdev)
spin_unlock_irq(&mdev->req_lock); spin_unlock_irq(&mdev->req_lock);
list_for_each_entry_safe(e, t, &reclaimed, w.list) list_for_each_entry_safe(e, t, &reclaimed, w.list)
drbd_free_ee(mdev, e); drbd_free_net_ee(mdev, e);
/* possible callbacks here: /* possible callbacks here:
* e_end_block, and e_end_resync_block, e_send_discard_ack. * e_end_block, and e_end_resync_block, e_send_discard_ack.
...@@ -1460,7 +1463,7 @@ static int drbd_drain_block(struct drbd_conf *mdev, int data_size) ...@@ -1460,7 +1463,7 @@ static int drbd_drain_block(struct drbd_conf *mdev, int data_size)
data_size -= rr; data_size -= rr;
} }
kunmap(page); kunmap(page);
drbd_pp_free(mdev, page); drbd_pp_free(mdev, page, 0);
return rv; return rv;
} }
...@@ -3879,6 +3882,9 @@ static void drbd_disconnect(struct drbd_conf *mdev) ...@@ -3879,6 +3882,9 @@ static void drbd_disconnect(struct drbd_conf *mdev)
i = drbd_release_ee(mdev, &mdev->net_ee); i = drbd_release_ee(mdev, &mdev->net_ee);
if (i) if (i)
dev_info(DEV, "net_ee not empty, killed %u entries\n", i); dev_info(DEV, "net_ee not empty, killed %u entries\n", i);
i = atomic_read(&mdev->pp_in_use_by_net);
if (i)
dev_info(DEV, "pp_in_use_by_net = %d, expected 0\n", i);
i = atomic_read(&mdev->pp_in_use); i = atomic_read(&mdev->pp_in_use);
if (i) if (i)
dev_info(DEV, "pp_in_use = %d, expected 0\n", i); dev_info(DEV, "pp_in_use = %d, expected 0\n", i);
......
...@@ -914,9 +914,13 @@ static void move_to_net_ee_or_free(struct drbd_conf *mdev, struct drbd_epoch_ent ...@@ -914,9 +914,13 @@ static void move_to_net_ee_or_free(struct drbd_conf *mdev, struct drbd_epoch_ent
{ {
if (drbd_ee_has_active_page(e)) { if (drbd_ee_has_active_page(e)) {
/* This might happen if sendpage() has not finished */ /* This might happen if sendpage() has not finished */
int i = DIV_ROUND_UP(e->size, PAGE_SIZE);
atomic_add(i, &mdev->pp_in_use_by_net);
atomic_sub(i, &mdev->pp_in_use);
spin_lock_irq(&mdev->req_lock); spin_lock_irq(&mdev->req_lock);
list_add_tail(&e->w.list, &mdev->net_ee); list_add_tail(&e->w.list, &mdev->net_ee);
spin_unlock_irq(&mdev->req_lock); spin_unlock_irq(&mdev->req_lock);
wake_up(&drbd_pp_wait);
} else } else
drbd_free_ee(mdev, e); drbd_free_ee(mdev, e);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment