Commit 368d3cb4 authored by Yunsheng Lin's avatar Yunsheng Lin Committed by Jakub Kicinski

page_pool: fix inconsistency for page_pool_ring_[un]lock()

page_pool_ring_[un]lock() use in_softirq() to decide which
spin lock variant to use, and when they are called in the
context with in_softirq() being false, spin_lock_bh() is
called in page_pool_ring_lock() while spin_unlock() is
called in page_pool_ring_unlock(), because spin_lock_bh()
has disabled the softirq in page_pool_ring_lock(), which
causes inconsistency for spin lock pair calling.

This patch fixes it by returning in_softirq state from
page_pool_producer_lock(), and use it to decide which
spin lock variant to use in page_pool_producer_unlock().

As pool->ring has both producer and consumer lock, so
rename it to page_pool_producer_[un]lock() to reflect
the actual usage. Also move them to page_pool.c as they
are only used there, and remove the 'inline' as the
compiler may have better idea to do inlining or not.

Fixes: 78862447 ("net: page_pool: Add bulk support for ptr_ring")
Signed-off-by: default avatarYunsheng Lin <linyunsheng@huawei.com>
Acked-by: default avatarJesper Dangaard Brouer <brouer@redhat.com>
Acked-by: default avatarIlias Apalodimas <ilias.apalodimas@linaro.org>
Link: https://lore.kernel.org/r/20230522031714.5089-1-linyunsheng@huawei.comSigned-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parent 3632679d
...@@ -399,22 +399,4 @@ static inline void page_pool_nid_changed(struct page_pool *pool, int new_nid) ...@@ -399,22 +399,4 @@ static inline void page_pool_nid_changed(struct page_pool *pool, int new_nid)
page_pool_update_nid(pool, new_nid); page_pool_update_nid(pool, new_nid);
} }
static inline void page_pool_ring_lock(struct page_pool *pool)
__acquires(&pool->ring.producer_lock)
{
if (in_softirq())
spin_lock(&pool->ring.producer_lock);
else
spin_lock_bh(&pool->ring.producer_lock);
}
static inline void page_pool_ring_unlock(struct page_pool *pool)
__releases(&pool->ring.producer_lock)
{
if (in_softirq())
spin_unlock(&pool->ring.producer_lock);
else
spin_unlock_bh(&pool->ring.producer_lock);
}
#endif /* _NET_PAGE_POOL_H */ #endif /* _NET_PAGE_POOL_H */
...@@ -134,6 +134,29 @@ EXPORT_SYMBOL(page_pool_ethtool_stats_get); ...@@ -134,6 +134,29 @@ EXPORT_SYMBOL(page_pool_ethtool_stats_get);
#define recycle_stat_add(pool, __stat, val) #define recycle_stat_add(pool, __stat, val)
#endif #endif
static bool page_pool_producer_lock(struct page_pool *pool)
__acquires(&pool->ring.producer_lock)
{
bool in_softirq = in_softirq();
if (in_softirq)
spin_lock(&pool->ring.producer_lock);
else
spin_lock_bh(&pool->ring.producer_lock);
return in_softirq;
}
static void page_pool_producer_unlock(struct page_pool *pool,
bool in_softirq)
__releases(&pool->ring.producer_lock)
{
if (in_softirq)
spin_unlock(&pool->ring.producer_lock);
else
spin_unlock_bh(&pool->ring.producer_lock);
}
static int page_pool_init(struct page_pool *pool, static int page_pool_init(struct page_pool *pool,
const struct page_pool_params *params) const struct page_pool_params *params)
{ {
...@@ -617,6 +640,7 @@ void page_pool_put_page_bulk(struct page_pool *pool, void **data, ...@@ -617,6 +640,7 @@ void page_pool_put_page_bulk(struct page_pool *pool, void **data,
int count) int count)
{ {
int i, bulk_len = 0; int i, bulk_len = 0;
bool in_softirq;
for (i = 0; i < count; i++) { for (i = 0; i < count; i++) {
struct page *page = virt_to_head_page(data[i]); struct page *page = virt_to_head_page(data[i]);
...@@ -635,7 +659,7 @@ void page_pool_put_page_bulk(struct page_pool *pool, void **data, ...@@ -635,7 +659,7 @@ void page_pool_put_page_bulk(struct page_pool *pool, void **data,
return; return;
/* Bulk producer into ptr_ring page_pool cache */ /* Bulk producer into ptr_ring page_pool cache */
page_pool_ring_lock(pool); in_softirq = page_pool_producer_lock(pool);
for (i = 0; i < bulk_len; i++) { for (i = 0; i < bulk_len; i++) {
if (__ptr_ring_produce(&pool->ring, data[i])) { if (__ptr_ring_produce(&pool->ring, data[i])) {
/* ring full */ /* ring full */
...@@ -644,7 +668,7 @@ void page_pool_put_page_bulk(struct page_pool *pool, void **data, ...@@ -644,7 +668,7 @@ void page_pool_put_page_bulk(struct page_pool *pool, void **data,
} }
} }
recycle_stat_add(pool, ring, i); recycle_stat_add(pool, ring, i);
page_pool_ring_unlock(pool); page_pool_producer_unlock(pool, in_softirq);
/* Hopefully all pages was return into ptr_ring */ /* Hopefully all pages was return into ptr_ring */
if (likely(i == bulk_len)) if (likely(i == bulk_len))
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment