Commit 554fef1c authored by Dennis Zhou's avatar Dennis Zhou Committed by Tejun Heo

percpu: allow select gfp to be passed to underlying allocators

The prior patch added support for passing gfp flags through to the
underlying allocators. This patch allows users to pass along gfp flags
(currently only __GFP_NORETRY and __GFP_NOWARN) to the underlying
allocators. This should allow users to decide if they are ok with
failing allocations recovering in a more graceful way.

Additionally, gfp passing was done as additional flags in the previous
patch. Instead, change this to caller passed semantics. GFP_KERNEL is
also removed as the default flag. It continues to be used for internally
caused underlying percpu allocations.

V2:
Removed gfp_percpu_mask in favor of doing it inline.
Removed GFP_KERNEL as a default flag for __alloc_percpu_gfp.
Signed-off-by: default avatarDennis Zhou <dennisszhou@gmail.com>
Suggested-by: default avatarDaniel Borkmann <daniel@iogearbox.net>
Acked-by: default avatarChristoph Lameter <cl@linux.com>
Signed-off-by: default avatarTejun Heo <tj@kernel.org>
parent 47504ee0
...@@ -56,7 +56,7 @@ static struct pcpu_chunk *pcpu_create_chunk(gfp_t gfp) ...@@ -56,7 +56,7 @@ static struct pcpu_chunk *pcpu_create_chunk(gfp_t gfp)
if (!chunk) if (!chunk)
return NULL; return NULL;
pages = alloc_pages(gfp | GFP_KERNEL, order_base_2(nr_pages)); pages = alloc_pages(gfp, order_base_2(nr_pages));
if (!pages) { if (!pages) {
pcpu_free_chunk(chunk); pcpu_free_chunk(chunk);
return NULL; return NULL;
......
...@@ -37,7 +37,7 @@ static struct page **pcpu_get_pages(void) ...@@ -37,7 +37,7 @@ static struct page **pcpu_get_pages(void)
lockdep_assert_held(&pcpu_alloc_mutex); lockdep_assert_held(&pcpu_alloc_mutex);
if (!pages) if (!pages)
pages = pcpu_mem_zalloc(pages_size, 0); pages = pcpu_mem_zalloc(pages_size, GFP_KERNEL);
return pages; return pages;
} }
...@@ -86,7 +86,7 @@ static int pcpu_alloc_pages(struct pcpu_chunk *chunk, ...@@ -86,7 +86,7 @@ static int pcpu_alloc_pages(struct pcpu_chunk *chunk,
unsigned int cpu, tcpu; unsigned int cpu, tcpu;
int i; int i;
gfp |= GFP_KERNEL | __GFP_HIGHMEM; gfp |= __GFP_HIGHMEM;
for_each_possible_cpu(cpu) { for_each_possible_cpu(cpu) {
for (i = page_start; i < page_end; i++) { for (i = page_start; i < page_end; i++) {
......
...@@ -454,9 +454,6 @@ static void pcpu_next_fit_region(struct pcpu_chunk *chunk, int alloc_bits, ...@@ -454,9 +454,6 @@ static void pcpu_next_fit_region(struct pcpu_chunk *chunk, int alloc_bits,
* This is to facilitate passing through whitelisted flags. The * This is to facilitate passing through whitelisted flags. The
* returned memory is always zeroed. * returned memory is always zeroed.
* *
* CONTEXT:
* Does GFP_KERNEL allocation.
*
* RETURNS: * RETURNS:
* Pointer to the allocated area on success, NULL on failure. * Pointer to the allocated area on success, NULL on failure.
*/ */
...@@ -466,10 +463,9 @@ static void *pcpu_mem_zalloc(size_t size, gfp_t gfp) ...@@ -466,10 +463,9 @@ static void *pcpu_mem_zalloc(size_t size, gfp_t gfp)
return NULL; return NULL;
if (size <= PAGE_SIZE) if (size <= PAGE_SIZE)
return kzalloc(size, gfp | GFP_KERNEL); return kzalloc(size, gfp);
else else
return __vmalloc(size, gfp | GFP_KERNEL | __GFP_ZERO, return __vmalloc(size, gfp | __GFP_ZERO, PAGE_KERNEL);
PAGE_KERNEL);
} }
/** /**
...@@ -1344,6 +1340,8 @@ static struct pcpu_chunk *pcpu_chunk_addr_search(void *addr) ...@@ -1344,6 +1340,8 @@ static struct pcpu_chunk *pcpu_chunk_addr_search(void *addr)
static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved, static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved,
gfp_t gfp) gfp_t gfp)
{ {
/* whitelisted flags that can be passed to the backing allocators */
gfp_t pcpu_gfp = gfp & (GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN);
bool is_atomic = (gfp & GFP_KERNEL) != GFP_KERNEL; bool is_atomic = (gfp & GFP_KERNEL) != GFP_KERNEL;
bool do_warn = !(gfp & __GFP_NOWARN); bool do_warn = !(gfp & __GFP_NOWARN);
static int warn_limit = 10; static int warn_limit = 10;
...@@ -1426,7 +1424,7 @@ static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved, ...@@ -1426,7 +1424,7 @@ static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved,
} }
if (list_empty(&pcpu_slot[pcpu_nr_slots - 1])) { if (list_empty(&pcpu_slot[pcpu_nr_slots - 1])) {
chunk = pcpu_create_chunk(0); chunk = pcpu_create_chunk(pcpu_gfp);
if (!chunk) { if (!chunk) {
err = "failed to allocate new chunk"; err = "failed to allocate new chunk";
goto fail; goto fail;
...@@ -1455,7 +1453,7 @@ static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved, ...@@ -1455,7 +1453,7 @@ static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved,
page_start, page_end) { page_start, page_end) {
WARN_ON(chunk->immutable); WARN_ON(chunk->immutable);
ret = pcpu_populate_chunk(chunk, rs, re, 0); ret = pcpu_populate_chunk(chunk, rs, re, pcpu_gfp);
spin_lock_irqsave(&pcpu_lock, flags); spin_lock_irqsave(&pcpu_lock, flags);
if (ret) { if (ret) {
...@@ -1576,7 +1574,7 @@ void __percpu *__alloc_reserved_percpu(size_t size, size_t align) ...@@ -1576,7 +1574,7 @@ void __percpu *__alloc_reserved_percpu(size_t size, size_t align)
static void pcpu_balance_workfn(struct work_struct *work) static void pcpu_balance_workfn(struct work_struct *work)
{ {
/* gfp flags passed to underlying allocators */ /* gfp flags passed to underlying allocators */
const gfp_t gfp = __GFP_NORETRY | __GFP_NOWARN; const gfp_t gfp = GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN;
LIST_HEAD(to_free); LIST_HEAD(to_free);
struct list_head *free_head = &pcpu_slot[pcpu_nr_slots - 1]; struct list_head *free_head = &pcpu_slot[pcpu_nr_slots - 1];
struct pcpu_chunk *chunk, *next; struct pcpu_chunk *chunk, *next;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment