Commit 7115ac6e authored by Vlastimil Babka's avatar Vlastimil Babka Committed by Linus Torvalds

mm, page_alloc: clean up pageset high and batch update

Patch series "disable pcplists during memory offline", v3.

As per the discussions [1] [2] this is an attempt to implement David's
suggestion that page isolation should disable pcplists to avoid races with
page freeing in progress.  This is done without extra checks in fast
paths, as explained in Patch 9.  The repeated draining done by [2] is then
no longer needed.  Previous version (RFC) is at [3].

The RFC tried to hide pcplists disabling/enabling into page isolation, but
it wasn't completely possible, as memory offline does not unisolation.
Michal suggested an explicit API in [4] so that's the current
implementation and it seems indeed nicer.

Once we accept that page isolation users need to do explicit actions
around it depending on the needed guarantees, we can also IMHO accept that
the current pcplist draining can be also done by the callers, which is
more effective.  After all, there are only two users of page isolation.
So patch 6 does effectively the same thing as Pavel proposed in [5], and
patch 7 implement stronger guarantees only for memory offline.  If CMA
decides to opt-in to the stronger guarantee, it can be added later.

Patches 1-5 are preparatory cleanups for pcplist disabling.

Patchset was briefly tested in QEMU so that memory online/offline works,
but I haven't done a stress test that would prove the race fixed by [2] is
eliminated.

Note that patch 7 could be avoided if we instead adjusted page freeing in
shown in [6], but I believe the current implementation of disabling
pcplists is not too much complex, so I would prefer this instead of adding
new checks and longer irq-disabled section into page freeing hotpaths.

[1] https://lore.kernel.org/linux-mm/20200901124615.137200-1-pasha.tatashin@soleen.com/
[2] https://lore.kernel.org/linux-mm/20200903140032.380431-1-pasha.tatashin@soleen.com/
[3] https://lore.kernel.org/linux-mm/20200907163628.26495-1-vbabka@suse.cz/
[4] https://lore.kernel.org/linux-mm/20200909113647.GG7348@dhcp22.suse.cz/
[5] https://lore.kernel.org/linux-mm/20200904151448.100489-3-pasha.tatashin@soleen.com/
[6] https://lore.kernel.org/linux-mm/3d3b53db-aeaa-ff24-260b-36427fac9b1c@suse.cz/
[7] https://lore.kernel.org/linux-mm/20200922143712.12048-1-vbabka@suse.cz/
[8] https://lore.kernel.org/linux-mm/20201008114201.18824-1-vbabka@suse.cz/

This patch (of 7):

The updates to pcplists' high and batch values are handled by multiple
functions that make the calculations hard to follow.  Consolidate
everything to pageset_set_high_and_batch() and remove pageset_set_batch()
and pageset_set_high() wrappers.

The only special case using one of the removed wrappers was:
build_all_zonelists_init()

  setup_pageset()
    pageset_set_batch()

which was hardcoding batch as 0, so we can just open-code a call to
pageset_update() with constant parameters instead.

No functional change.

Link: https://lkml.kernel.org/r/20201111092812.11329-1-vbabka@suse.cz
Link: https://lkml.kernel.org/r/20201111092812.11329-2-vbabka@suse.czSigned-off-by: default avatarVlastimil Babka <vbabka@suse.cz>
Reviewed-by: default avatarOscar Salvador <osalvador@suse.de>
Reviewed-by: default avatarDavid Hildenbrand <david@redhat.com>
Acked-by: default avatarMichal Hocko <mhocko@suse.com>
Acked-by: default avatarPankaj Gupta <pankaj.gupta@cloud.ionos.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 32a0de88
...@@ -5919,7 +5919,7 @@ static void build_zonelists(pg_data_t *pgdat) ...@@ -5919,7 +5919,7 @@ static void build_zonelists(pg_data_t *pgdat)
* not check if the processor is online before following the pageset pointer. * not check if the processor is online before following the pageset pointer.
* Other parts of the kernel may not check if the zone is available. * Other parts of the kernel may not check if the zone is available.
*/ */
static void setup_pageset(struct per_cpu_pageset *p, unsigned long batch); static void setup_pageset(struct per_cpu_pageset *p);
static DEFINE_PER_CPU(struct per_cpu_pageset, boot_pageset); static DEFINE_PER_CPU(struct per_cpu_pageset, boot_pageset);
static DEFINE_PER_CPU(struct per_cpu_nodestat, boot_nodestats); static DEFINE_PER_CPU(struct per_cpu_nodestat, boot_nodestats);
...@@ -5987,7 +5987,7 @@ build_all_zonelists_init(void) ...@@ -5987,7 +5987,7 @@ build_all_zonelists_init(void)
* (a chicken-egg dilemma). * (a chicken-egg dilemma).
*/ */
for_each_possible_cpu(cpu) for_each_possible_cpu(cpu)
setup_pageset(&per_cpu(boot_pageset, cpu), 0); setup_pageset(&per_cpu(boot_pageset, cpu));
mminit_verify_zonelist(); mminit_verify_zonelist();
cpuset_init_current_mems_allowed(); cpuset_init_current_mems_allowed();
...@@ -6296,12 +6296,6 @@ static void pageset_update(struct per_cpu_pages *pcp, unsigned long high, ...@@ -6296,12 +6296,6 @@ static void pageset_update(struct per_cpu_pages *pcp, unsigned long high,
pcp->batch = batch; pcp->batch = batch;
} }
/* a companion to pageset_set_high() */
static void pageset_set_batch(struct per_cpu_pageset *p, unsigned long batch)
{
pageset_update(&p->pcp, 6 * batch, max(1UL, 1 * batch));
}
static void pageset_init(struct per_cpu_pageset *p) static void pageset_init(struct per_cpu_pageset *p)
{ {
struct per_cpu_pages *pcp; struct per_cpu_pages *pcp;
...@@ -6314,35 +6308,32 @@ static void pageset_init(struct per_cpu_pageset *p) ...@@ -6314,35 +6308,32 @@ static void pageset_init(struct per_cpu_pageset *p)
INIT_LIST_HEAD(&pcp->lists[migratetype]); INIT_LIST_HEAD(&pcp->lists[migratetype]);
} }
static void setup_pageset(struct per_cpu_pageset *p, unsigned long batch) static void setup_pageset(struct per_cpu_pageset *p)
{ {
pageset_init(p); pageset_init(p);
pageset_set_batch(p, batch); pageset_update(&p->pcp, 0, 1);
} }
/* /*
* pageset_set_high() sets the high water mark for hot per_cpu_pagelist * Calculate and set new high and batch values for given per-cpu pageset of a
* to the value high for the pageset p. * zone, based on the zone's size and the percpu_pagelist_fraction sysctl.
*/ */
static void pageset_set_high(struct per_cpu_pageset *p,
unsigned long high)
{
unsigned long batch = max(1UL, high / 4);
if ((high / 4) > (PAGE_SHIFT * 8))
batch = PAGE_SHIFT * 8;
pageset_update(&p->pcp, high, batch);
}
static void pageset_set_high_and_batch(struct zone *zone, static void pageset_set_high_and_batch(struct zone *zone,
struct per_cpu_pageset *pcp) struct per_cpu_pageset *p)
{ {
if (percpu_pagelist_fraction) unsigned long new_high, new_batch;
pageset_set_high(pcp,
(zone_managed_pages(zone) / if (percpu_pagelist_fraction) {
percpu_pagelist_fraction)); new_high = zone_managed_pages(zone) / percpu_pagelist_fraction;
else new_batch = max(1UL, new_high / 4);
pageset_set_batch(pcp, zone_batchsize(zone)); if ((new_high / 4) > (PAGE_SHIFT * 8))
new_batch = PAGE_SHIFT * 8;
} else {
new_batch = zone_batchsize(zone);
new_high = 6 * new_batch;
new_batch = max(1UL, 1 * new_batch);
}
pageset_update(&p->pcp, new_high, new_batch);
} }
static void __meminit zone_pageset_init(struct zone *zone, int cpu) static void __meminit zone_pageset_init(struct zone *zone, int cpu)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment