Commit d956a048 authored by Jesper Dangaard Brouer's avatar Jesper Dangaard Brouer Committed by David S. Miller

xdp: force mem allocator removal and periodic warning

If bugs exists or are introduced later e.g. by drivers misusing the API,
then we want to warn about the issue, such that developer notice. This patch
will generate a bit of noise in form of periodic pr_warn every 30 seconds.

It is not nice to have this stall warning running forever. Thus, this patch
will (after 120 attempts) force disconnect the mem id (from the rhashtable)
and free the page_pool object. This will cause fallback to the put_page() as
before, which only potentially leak DMA-mappings, if objects are really
stuck for this long. In that unlikely case, a WARN_ONCE should show us the
call stack.
Signed-off-by: default avatarJesper Dangaard Brouer <brouer@redhat.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 99c07c43
...@@ -330,11 +330,27 @@ static void __page_pool_empty_ring(struct page_pool *pool) ...@@ -330,11 +330,27 @@ static void __page_pool_empty_ring(struct page_pool *pool)
} }
} }
static void __warn_in_flight(struct page_pool *pool)
{
u32 release_cnt = atomic_read(&pool->pages_state_release_cnt);
u32 hold_cnt = READ_ONCE(pool->pages_state_hold_cnt);
s32 distance;
distance = _distance(hold_cnt, release_cnt);
/* Drivers should fix this, but only problematic when DMA is used */
WARN(1, "Still in-flight pages:%d hold:%u released:%u",
distance, hold_cnt, release_cnt);
}
void __page_pool_free(struct page_pool *pool) void __page_pool_free(struct page_pool *pool)
{ {
WARN(pool->alloc.count, "API usage violation"); WARN(pool->alloc.count, "API usage violation");
WARN(!ptr_ring_empty(&pool->ring), "ptr_ring is not empty"); WARN(!ptr_ring_empty(&pool->ring), "ptr_ring is not empty");
WARN(!__page_pool_safe_to_destroy(pool), "still in-flight pages");
/* Can happen due to forced shutdown */
if (!__page_pool_safe_to_destroy(pool))
__warn_in_flight(pool);
ptr_ring_cleanup(&pool->ring, NULL); ptr_ring_cleanup(&pool->ring, NULL);
kfree(pool); kfree(pool);
......
...@@ -39,6 +39,9 @@ struct xdp_mem_allocator { ...@@ -39,6 +39,9 @@ struct xdp_mem_allocator {
struct rhash_head node; struct rhash_head node;
struct rcu_head rcu; struct rcu_head rcu;
struct delayed_work defer_wq; struct delayed_work defer_wq;
unsigned long defer_start;
unsigned long defer_warn;
int disconnect_cnt;
}; };
static u32 xdp_mem_id_hashfn(const void *data, u32 len, u32 seed) static u32 xdp_mem_id_hashfn(const void *data, u32 len, u32 seed)
...@@ -95,7 +98,7 @@ static void __xdp_mem_allocator_rcu_free(struct rcu_head *rcu) ...@@ -95,7 +98,7 @@ static void __xdp_mem_allocator_rcu_free(struct rcu_head *rcu)
kfree(xa); kfree(xa);
} }
bool __mem_id_disconnect(int id) bool __mem_id_disconnect(int id, bool force)
{ {
struct xdp_mem_allocator *xa; struct xdp_mem_allocator *xa;
bool safe_to_remove = true; bool safe_to_remove = true;
...@@ -108,29 +111,47 @@ bool __mem_id_disconnect(int id) ...@@ -108,29 +111,47 @@ bool __mem_id_disconnect(int id)
WARN(1, "Request remove non-existing id(%d), driver bug?", id); WARN(1, "Request remove non-existing id(%d), driver bug?", id);
return true; return true;
} }
xa->disconnect_cnt++;
/* Detects in-flight packet-pages for page_pool */ /* Detects in-flight packet-pages for page_pool */
if (xa->mem.type == MEM_TYPE_PAGE_POOL) if (xa->mem.type == MEM_TYPE_PAGE_POOL)
safe_to_remove = page_pool_request_shutdown(xa->page_pool); safe_to_remove = page_pool_request_shutdown(xa->page_pool);
if (safe_to_remove && /* TODO: Tracepoint will be added here in next-patch */
if ((safe_to_remove || force) &&
!rhashtable_remove_fast(mem_id_ht, &xa->node, mem_id_rht_params)) !rhashtable_remove_fast(mem_id_ht, &xa->node, mem_id_rht_params))
call_rcu(&xa->rcu, __xdp_mem_allocator_rcu_free); call_rcu(&xa->rcu, __xdp_mem_allocator_rcu_free);
mutex_unlock(&mem_id_lock); mutex_unlock(&mem_id_lock);
return safe_to_remove; return (safe_to_remove|force);
} }
#define DEFER_TIME (msecs_to_jiffies(1000)) #define DEFER_TIME (msecs_to_jiffies(1000))
#define DEFER_WARN_INTERVAL (30 * HZ)
#define DEFER_MAX_RETRIES 120
static void mem_id_disconnect_defer_retry(struct work_struct *wq) static void mem_id_disconnect_defer_retry(struct work_struct *wq)
{ {
struct delayed_work *dwq = to_delayed_work(wq); struct delayed_work *dwq = to_delayed_work(wq);
struct xdp_mem_allocator *xa = container_of(dwq, typeof(*xa), defer_wq); struct xdp_mem_allocator *xa = container_of(dwq, typeof(*xa), defer_wq);
bool force = false;
if (xa->disconnect_cnt > DEFER_MAX_RETRIES)
force = true;
if (__mem_id_disconnect(xa->mem.id)) if (__mem_id_disconnect(xa->mem.id, force))
return; return;
/* Periodic warning */
if (time_after_eq(jiffies, xa->defer_warn)) {
int sec = (s32)((u32)jiffies - (u32)xa->defer_start) / HZ;
pr_warn("%s() stalled mem.id=%u shutdown %d attempts %d sec\n",
__func__, xa->mem.id, xa->disconnect_cnt, sec);
xa->defer_warn = jiffies + DEFER_WARN_INTERVAL;
}
/* Still not ready to be disconnected, retry later */ /* Still not ready to be disconnected, retry later */
schedule_delayed_work(&xa->defer_wq, DEFER_TIME); schedule_delayed_work(&xa->defer_wq, DEFER_TIME);
} }
...@@ -153,7 +174,7 @@ void xdp_rxq_info_unreg_mem_model(struct xdp_rxq_info *xdp_rxq) ...@@ -153,7 +174,7 @@ void xdp_rxq_info_unreg_mem_model(struct xdp_rxq_info *xdp_rxq)
if (id == 0) if (id == 0)
return; return;
if (__mem_id_disconnect(id)) if (__mem_id_disconnect(id, false))
return; return;
/* Could not disconnect, defer new disconnect attempt to later */ /* Could not disconnect, defer new disconnect attempt to later */
...@@ -164,6 +185,8 @@ void xdp_rxq_info_unreg_mem_model(struct xdp_rxq_info *xdp_rxq) ...@@ -164,6 +185,8 @@ void xdp_rxq_info_unreg_mem_model(struct xdp_rxq_info *xdp_rxq)
mutex_unlock(&mem_id_lock); mutex_unlock(&mem_id_lock);
return; return;
} }
xa->defer_start = jiffies;
xa->defer_warn = jiffies + DEFER_WARN_INTERVAL;
INIT_DELAYED_WORK(&xa->defer_wq, mem_id_disconnect_defer_retry); INIT_DELAYED_WORK(&xa->defer_wq, mem_id_disconnect_defer_retry);
mutex_unlock(&mem_id_lock); mutex_unlock(&mem_id_lock);
...@@ -388,10 +411,12 @@ static void __xdp_return(void *data, struct xdp_mem_info *mem, bool napi_direct, ...@@ -388,10 +411,12 @@ static void __xdp_return(void *data, struct xdp_mem_info *mem, bool napi_direct,
/* mem->id is valid, checked in xdp_rxq_info_reg_mem_model() */ /* mem->id is valid, checked in xdp_rxq_info_reg_mem_model() */
xa = rhashtable_lookup(mem_id_ht, &mem->id, mem_id_rht_params); xa = rhashtable_lookup(mem_id_ht, &mem->id, mem_id_rht_params);
page = virt_to_head_page(data); page = virt_to_head_page(data);
if (xa) { if (likely(xa)) {
napi_direct &= !xdp_return_frame_no_direct(); napi_direct &= !xdp_return_frame_no_direct();
page_pool_put_page(xa->page_pool, page, napi_direct); page_pool_put_page(xa->page_pool, page, napi_direct);
} else { } else {
/* Hopefully stack show who to blame for late return */
WARN_ONCE(1, "page_pool gone mem.id=%d", mem->id);
put_page(page); put_page(page);
} }
rcu_read_unlock(); rcu_read_unlock();
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment